From fe64a72f42cee21ec8a6d685176f3d9ff5001b46 Mon Sep 17 00:00:00 2001 From: fkm3 Date: Thu, 19 Oct 2017 23:56:38 -0400 Subject: [PATCH] Update haddock (#162) --- docs/haddock/doc-index-124.html | 4 - docs/haddock/doc-index-47.html | 4 +- docs/haddock/doc-index-58.html | 4 +- docs/haddock/doc-index-92.html | 4 +- docs/haddock/doc-index-95.html | 4 +- docs/haddock/doc-index-A.html | 4 +- docs/haddock/doc-index-All.html | 4 +- docs/haddock/doc-index-B.html | 4 +- docs/haddock/doc-index-C.html | 4 +- docs/haddock/doc-index-D.html | 4 +- docs/haddock/doc-index-E.html | 4 +- docs/haddock/doc-index-F.html | 4 +- docs/haddock/doc-index-G.html | 4 +- docs/haddock/doc-index-H.html | 4 +- docs/haddock/doc-index-I.html | 4 +- docs/haddock/doc-index-J.html | 4 + docs/haddock/doc-index-K.html | 4 +- docs/haddock/doc-index-L.html | 4 +- docs/haddock/doc-index-M.html | 4 +- docs/haddock/doc-index-N.html | 4 +- docs/haddock/doc-index-O.html | 4 +- docs/haddock/doc-index-P.html | 4 +- docs/haddock/doc-index-Q.html | 4 +- docs/haddock/doc-index-R.html | 4 +- docs/haddock/doc-index-S.html | 4 +- docs/haddock/doc-index-T.html | 4 +- docs/haddock/doc-index-U.html | 4 +- docs/haddock/doc-index-V.html | 4 +- docs/haddock/doc-index-W.html | 4 +- docs/haddock/doc-index-Z.html | 4 +- docs/haddock/doc-index.html | 4 +- docs/haddock/frames.html | 30 - docs/haddock/haddock-util.js | 50 +- docs/haddock/index-frames.html | 4 - docs/haddock/index.html | 4 +- docs/haddock/ocean.css | 68 +- .../tensorflow-0.1.0.0/TensorFlow-Build.html | 14 - .../TensorFlow-BuildOp.html | 6 - .../TensorFlow-ControlFlow.html | 6 - .../tensorflow-0.1.0.0/TensorFlow-Core.html | 49 - .../TensorFlow-Internal-FFI.html | 8 - .../TensorFlow-Internal-VarInt.html | 4 - .../tensorflow-0.1.0.0/TensorFlow-Nodes.html | 6 - .../tensorflow-0.1.0.0/TensorFlow-Output.html | 13 - .../TensorFlow-Session.html | 19 - .../tensorflow-0.1.0.0/TensorFlow-Tensor.html | 25 - .../tensorflow-0.1.0.0/TensorFlow-Types.html | 12 - .../tensorflow-0.1.0.0/doc-index-124.html | 4 - .../tensorflow-0.1.0.0/doc-index-All.html | 4 - .../tensorflow-0.1.0.0/doc-index-R.html | 4 - docs/haddock/tensorflow-0.1.0.0/frames.html | 30 - .../tensorflow-0.1.0.0/haddock-util.js | 344 - .../tensorflow-0.1.0.0/index-frames.html | 4 - docs/haddock/tensorflow-0.1.0.0/index.html | 4 - .../mini_TensorFlow-Core.html | 4 - .../mini_TensorFlow-Session.html | 4 - .../mini_TensorFlow-Tensor.html | 4 - .../mini_TensorFlow-Types.html | 4 - docs/haddock/tensorflow-0.1.0.0/ocean.css | 600 - .../haddock/tensorflow-0.1.0.0/tensorflow.txt | 942 - docs/haddock/tensorflow-0.1.0.2/LICENSE | 203 + .../tensorflow-0.1.0.2/TensorFlow-Build.html | 14 + .../TensorFlow-BuildOp.html | 6 + .../TensorFlow-ControlFlow.html | 6 + .../tensorflow-0.1.0.2/TensorFlow-Core.html | 50 + .../TensorFlow-Internal-FFI.html | 8 + .../TensorFlow-Internal-VarInt.html | 4 + .../tensorflow-0.1.0.2/TensorFlow-Nodes.html | 6 + .../tensorflow-0.1.0.2/TensorFlow-Output.html | 12 + .../TensorFlow-Session.html | 19 + .../tensorflow-0.1.0.2/TensorFlow-Tensor.html | 25 + .../tensorflow-0.1.0.2/TensorFlow-Types.html | 12 + .../doc-index-47.html | 4 +- .../doc-index-58.html | 4 +- .../doc-index-92.html | 4 +- .../doc-index-95.html | 4 +- .../doc-index-A.html | 4 +- .../tensorflow-0.1.0.2/doc-index-All.html | 4 + .../doc-index-B.html | 4 +- .../doc-index-C.html | 4 +- .../doc-index-D.html | 4 +- .../doc-index-E.html | 4 +- .../doc-index-F.html | 4 +- .../doc-index-G.html | 4 +- .../doc-index-H.html | 4 +- .../doc-index-I.html | 4 +- .../doc-index-L.html | 4 +- .../doc-index-M.html | 4 +- .../doc-index-N.html | 4 +- .../doc-index-O.html | 4 +- .../doc-index-P.html | 4 +- .../tensorflow-0.1.0.2/doc-index-R.html | 4 + .../doc-index-S.html | 4 +- .../doc-index-T.html | 4 +- .../doc-index-U.html | 4 +- .../doc-index-V.html | 4 +- .../doc-index-W.html | 4 +- .../doc-index.html | 4 +- .../haddock-util.js | 50 +- .../hslogo-16.png | Bin docs/haddock/tensorflow-0.1.0.2/index.html | 9 + .../mini_TensorFlow-Build.html | 2 +- .../mini_TensorFlow-BuildOp.html | 2 +- .../mini_TensorFlow-ControlFlow.html | 2 +- .../mini_TensorFlow-Core.html | 4 + .../mini_TensorFlow-Internal-FFI.html | 2 +- .../mini_TensorFlow-Internal-VarInt.html | 2 +- .../mini_TensorFlow-Nodes.html | 2 +- .../mini_TensorFlow-Output.html | 4 +- .../mini_TensorFlow-Session.html | 4 + .../mini_TensorFlow-Tensor.html | 4 + .../mini_TensorFlow-Types.html | 4 + .../minus.gif | Bin .../ocean.css | 68 +- .../plus.gif | Bin .../src/TensorFlow.Build.html | 339 + .../src/TensorFlow.BuildOp.html | 307 + .../src/TensorFlow.ControlFlow.html | 51 + .../src/TensorFlow.Core.html | 93 + .../src/TensorFlow.Internal.FFI.html | 267 + .../src/TensorFlow.Internal.Raw.html | 512 + .../src/TensorFlow.Internal.VarInt.html | 51 + .../src/TensorFlow.Nodes.html | 147 + .../src/TensorFlow.Output.html | 128 + .../src/TensorFlow.Session.html | 218 + .../src/TensorFlow.Tensor.html | 201 + .../src/TensorFlow.Types.html | 566 + .../tensorflow-0.1.0.2/src/highlight.js | 27 + docs/haddock/tensorflow-0.1.0.2/src/style.css | 55 + .../synopsis.png | Bin .../tensorflow-core-ops-0.1.0.0/LICENSE | 203 + .../TensorFlow-GenOps-Core.html | 3422 +- .../doc-index-95.html | 4 +- .../doc-index-A.html | 4 +- .../doc-index-All.html | 4 +- .../doc-index-B.html | 4 +- .../doc-index-C.html | 4 +- .../doc-index-D.html | 4 +- .../doc-index-E.html | 4 +- .../doc-index-F.html | 4 +- .../doc-index-G.html | 4 +- .../doc-index-H.html | 4 +- .../doc-index-I.html | 4 +- .../doc-index-L.html | 4 +- .../doc-index-M.html | 4 +- .../doc-index-N.html | 4 +- .../doc-index-O.html | 4 +- .../doc-index-P.html | 4 +- .../doc-index-Q.html | 4 +- .../doc-index-R.html | 4 +- .../doc-index-S.html | 4 +- .../doc-index-T.html | 4 +- .../doc-index-U.html | 4 +- .../doc-index-V.html | 4 +- .../doc-index-W.html | 4 +- .../doc-index-Z.html | 4 +- .../doc-index.html | 4 +- .../tensorflow-core-ops-0.1.0.0/frames.html | 30 - .../haddock-util.js | 50 +- .../index-frames.html | 4 - .../tensorflow-core-ops-0.1.0.0/index.html | 4 +- .../mini_TensorFlow-GenOps-Core.html | 4 +- .../tensorflow-core-ops-0.1.0.0/ocean.css | 68 +- .../src/TensorFlow.GenOps.Core.html | 48997 ++++++++++++++++ .../src/highlight.js | 27 + .../tensorflow-core-ops-0.1.0.0/src/style.css | 55 + .../tensorflow-core-ops.txt | 7082 --- .../tensorflow-logging-0.1.0.0/LICENSE | 203 + .../TensorFlow-Logging.html | 10 +- .../tensorflow-logging-0.1.0.0/doc-index.html | 4 +- .../tensorflow-logging-0.1.0.0/frames.html | 30 - .../haddock-util.js | 50 +- .../index-frames.html | 4 - .../tensorflow-logging-0.1.0.0/index.html | 4 +- .../mini_TensorFlow-Logging.html | 4 +- .../tensorflow-logging-0.1.0.0/ocean.css | 68 +- .../src/TensorFlow.Logging.html | 170 + .../src/highlight.js | 27 + .../tensorflow-logging-0.1.0.0/src/style.css | 55 + .../tensorflow-logging.txt | 61 - docs/haddock/tensorflow-mnist-0.1.0.0/LICENSE | 203 + .../TensorFlow-Examples-MNIST-Parse.html | 4 +- ...ensorFlow-Examples-MNIST-TrainedGraph.html | 4 +- .../tensorflow-mnist-0.1.0.0/doc-index.html | 4 +- .../tensorflow-mnist-0.1.0.0/frames.html | 30 - .../tensorflow-mnist-0.1.0.0/haddock-util.js | 50 +- .../index-frames.html | 4 - .../tensorflow-mnist-0.1.0.0/index.html | 4 +- .../mini_TensorFlow-Examples-MNIST-Parse.html | 2 +- ...ensorFlow-Examples-MNIST-TrainedGraph.html | 2 +- .../tensorflow-mnist-0.1.0.0/ocean.css | 68 +- .../src/Paths_tensorflow_mnist.html | 51 + .../src/TensorFlow.Examples.MNIST.Parse.html | 97 + ...ensorFlow.Examples.MNIST.TrainedGraph.html | 31 + .../tensorflow-mnist-0.1.0.0/src/highlight.js | 27 + .../tensorflow-mnist-0.1.0.0/src/style.css | 55 + .../tensorflow-mnist.txt | 41 - .../LICENSE | 203 + .../TensorFlow-Examples-MNIST-InputData.html | 4 +- .../doc-index.html | 4 +- .../frames.html | 30 - .../haddock-util.js | 50 +- .../index-frames.html | 4 - .../index.html | 4 +- ...i_TensorFlow-Examples-MNIST-InputData.html | 2 +- .../ocean.css | 68 +- .../Paths_tensorflow_mnist_input_data.html | 51 + .../TensorFlow.Examples.MNIST.InputData.html | 32 + .../src/highlight.js | 27 + .../src/style.css | 55 + .../tensorflow-mnist-input-data.txt | 19 - .../tensorflow-nn-0.1.0.0/TensorFlow-NN.html | 15 - .../tensorflow-nn-0.1.0.0/doc-index.html | 4 - .../haddock/tensorflow-nn-0.1.0.0/frames.html | 30 - .../tensorflow-nn-0.1.0.0/haddock-util.js | 344 - .../tensorflow-nn-0.1.0.0/hslogo-16.png | Bin 1684 -> 0 bytes .../tensorflow-nn-0.1.0.0/index-frames.html | 4 - docs/haddock/tensorflow-nn-0.1.0.0/index.html | 4 - docs/haddock/tensorflow-nn-0.1.0.0/minus.gif | Bin 56 -> 0 bytes docs/haddock/tensorflow-nn-0.1.0.0/ocean.css | 600 - docs/haddock/tensorflow-nn-0.1.0.0/plus.gif | Bin 59 -> 0 bytes .../tensorflow-nn-0.1.0.0/synopsis.png | Bin 11327 -> 0 bytes .../tensorflow-nn-0.1.0.0/tensorflow-nn.txt | 40 - docs/haddock/tensorflow-opgen-0.1.0.0/LICENSE | 203 + .../TensorFlow-OpGen-ParsedOp.html | 12 +- .../TensorFlow-OpGen.html | 15 +- .../tensorflow-opgen-0.1.0.0/doc-index.html | 4 +- .../tensorflow-opgen-0.1.0.0/frames.html | 30 - .../tensorflow-opgen-0.1.0.0/haddock-util.js | 50 +- .../index-frames.html | 4 - .../tensorflow-opgen-0.1.0.0/index.html | 4 +- .../mini_TensorFlow-OpGen-ParsedOp.html | 4 +- .../mini_TensorFlow-OpGen.html | 2 +- .../tensorflow-opgen-0.1.0.0/ocean.css | 68 +- .../src/TensorFlow.OpGen.ParsedOp.html | 344 + .../src/TensorFlow.OpGen.html | 453 + .../tensorflow-opgen-0.1.0.0/src/highlight.js | 27 + .../tensorflow-opgen-0.1.0.0/src/style.css | 55 + .../tensorflow-opgen.txt | 161 - docs/haddock/tensorflow-ops-0.1.0.0/LICENSE | 203 + .../TensorFlow-EmbeddingOps.html | 12 +- .../TensorFlow-Gradient.html | 4 +- .../TensorFlow-Minimize.html | 5 + .../tensorflow-ops-0.1.0.0/TensorFlow-NN.html | 15 + .../TensorFlow-Ops.html | 109 +- .../TensorFlow-Queue.html | 8 + .../TensorFlow-Variable.html | 19 + .../tensorflow-ops-0.1.0.0/doc-index.html | 4 +- .../tensorflow-ops-0.1.0.0/frames.html | 30 - .../tensorflow-ops-0.1.0.0/haddock-util.js | 50 +- .../tensorflow-ops-0.1.0.0/index-frames.html | 4 - .../haddock/tensorflow-ops-0.1.0.0/index.html | 4 +- .../mini_TensorFlow-EmbeddingOps.html | 2 +- .../mini_TensorFlow-Gradient.html | 4 +- .../mini_TensorFlow-Minimize.html | 4 + .../mini_TensorFlow-NN.html | 2 +- .../mini_TensorFlow-Ops.html | 4 +- .../mini_TensorFlow-Queue.html | 2 +- .../mini_TensorFlow-Variable.html | 4 + docs/haddock/tensorflow-ops-0.1.0.0/ocean.css | 68 +- .../src/TensorFlow.EmbeddingOps.html | 92 + .../src/TensorFlow.Gradient.html | 856 + .../src/TensorFlow.Minimize.html | 116 + .../src/TensorFlow.NN.html | 89 + .../src/TensorFlow.Ops.html | 409 + .../src/TensorFlow.Queue.html | 72 + .../src/TensorFlow.Variable.html | 195 + .../tensorflow-ops-0.1.0.0/src/highlight.js | 27 + .../tensorflow-ops-0.1.0.0/src/style.css | 55 + .../tensorflow-ops-0.1.0.0/tensorflow-ops.txt | 491 - docs/haddock/tensorflow-proto-0.1.0.0/LICENSE | 203 + ...Proto-Tensorflow-Core-Example-Example.html | 4 + ...re-Example-ExampleParserConfiguration.html | 4 + ...Proto-Tensorflow-Core-Example-Feature.html | 4 + ...-Core-Framework-AllocationDescription.html | 4 + ...o-Tensorflow-Core-Framework-AttrValue.html | 4 +- ...o-Tensorflow-Core-Framework-CostGraph.html | 4 + ...rflow-Core-Framework-DeviceAttributes.html | 4 + ...to-Tensorflow-Core-Framework-Function.html | 4 + ...Proto-Tensorflow-Core-Framework-Graph.html | 4 +- ...o-Tensorflow-Core-Framework-KernelDef.html | 4 + ...o-Tensorflow-Core-Framework-LogMemory.html | 4 + ...oto-Tensorflow-Core-Framework-NodeDef.html | 4 +- ...Proto-Tensorflow-Core-Framework-OpDef.html | 4 +- ...sorflow-Core-Framework-ResourceHandle.html | 4 +- ...o-Tensorflow-Core-Framework-StepStats.html | 4 + ...oto-Tensorflow-Core-Framework-Summary.html | 4 +- ...roto-Tensorflow-Core-Framework-Tensor.html | 4 +- ...flow-Core-Framework-TensorDescription.html | 4 + ...Tensorflow-Core-Framework-TensorShape.html | 4 +- ...Tensorflow-Core-Framework-TensorSlice.html | 4 + ...Proto-Tensorflow-Core-Framework-Types.html | 4 +- ...to-Tensorflow-Core-Framework-Variable.html | 4 + ...to-Tensorflow-Core-Framework-Versions.html | 4 + ...o-Tensorflow-Core-Lib-Core-ErrorCodes.html | 4 + ...roto-Tensorflow-Core-Protobuf-Cluster.html | 4 + ...Proto-Tensorflow-Core-Protobuf-Config.html | 4 +- ...-Tensorflow-Core-Protobuf-ControlFlow.html | 4 + .../Proto-Tensorflow-Core-Protobuf-Debug.html | 4 + ...to-Tensorflow-Core-Protobuf-MetaGraph.html | 4 + ...-Tensorflow-Core-Protobuf-NamedTensor.html | 4 + ...-Tensorflow-Core-Protobuf-QueueRunner.html | 4 + ...nsorflow-Core-Protobuf-RewriterConfig.html | 4 + ...o-Tensorflow-Core-Protobuf-SavedModel.html | 4 + .../Proto-Tensorflow-Core-Protobuf-Saver.html | 4 + ...Tensorflow-Core-Protobuf-TensorBundle.html | 4 + ...orflow-Core-Protobuf-TensorflowServer.html | 4 + .../Proto-Tensorflow-Core-Util-Event.html | 4 +- ...sorflow-Core-Util-MemmappedFileSystem.html | 4 + ...Tensorflow-Core-Util-SavedTensorSlice.html | 4 + .../Proto-Tensorflow-Core-Util-TestLog.html | 4 + .../doc-index-95.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-A.html | 4 +- .../doc-index-All.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-B.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-C.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-D.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-E.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-F.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-G.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-H.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-I.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-J.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-K.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-L.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-M.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-N.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-O.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-P.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-Q.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-R.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-S.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-T.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-U.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-V.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index-W.html | 4 +- .../tensorflow-proto-0.1.0.0/doc-index.html | 4 +- .../tensorflow-proto-0.1.0.0/frames.html | 30 - .../tensorflow-proto-0.1.0.0/haddock-util.js | 50 +- .../index-frames.html | 4 - .../tensorflow-proto-0.1.0.0/index.html | 4 +- ...Proto-Tensorflow-Core-Example-Example.html | 4 + ...re-Example-ExampleParserConfiguration.html | 4 + ...Proto-Tensorflow-Core-Example-Feature.html | 4 + ...-Core-Framework-AllocationDescription.html | 4 + ...o-Tensorflow-Core-Framework-AttrValue.html | 4 +- ...o-Tensorflow-Core-Framework-CostGraph.html | 4 + ...rflow-Core-Framework-DeviceAttributes.html | 4 + ...to-Tensorflow-Core-Framework-Function.html | 4 + ...Proto-Tensorflow-Core-Framework-Graph.html | 2 +- ...o-Tensorflow-Core-Framework-KernelDef.html | 4 + ...o-Tensorflow-Core-Framework-LogMemory.html | 4 + ...oto-Tensorflow-Core-Framework-NodeDef.html | 2 +- ...Proto-Tensorflow-Core-Framework-OpDef.html | 2 +- ...sorflow-Core-Framework-ResourceHandle.html | 4 +- ...o-Tensorflow-Core-Framework-StepStats.html | 4 + ...oto-Tensorflow-Core-Framework-Summary.html | 4 +- ...roto-Tensorflow-Core-Framework-Tensor.html | 2 +- ...flow-Core-Framework-TensorDescription.html | 4 + ...Tensorflow-Core-Framework-TensorShape.html | 2 +- ...Tensorflow-Core-Framework-TensorSlice.html | 4 + ...Proto-Tensorflow-Core-Framework-Types.html | 2 +- ...to-Tensorflow-Core-Framework-Variable.html | 4 + ...to-Tensorflow-Core-Framework-Versions.html | 4 + ...o-Tensorflow-Core-Lib-Core-ErrorCodes.html | 4 + ...roto-Tensorflow-Core-Protobuf-Cluster.html | 4 + ...Proto-Tensorflow-Core-Protobuf-Config.html | 4 +- ...-Tensorflow-Core-Protobuf-ControlFlow.html | 4 + ..._Proto-Tensorflow-Core-Protobuf-Debug.html | 4 + ...to-Tensorflow-Core-Protobuf-MetaGraph.html | 4 + ...-Tensorflow-Core-Protobuf-NamedTensor.html | 4 + ...-Tensorflow-Core-Protobuf-QueueRunner.html | 4 + ...nsorflow-Core-Protobuf-RewriterConfig.html | 4 + ...o-Tensorflow-Core-Protobuf-SavedModel.html | 4 + ..._Proto-Tensorflow-Core-Protobuf-Saver.html | 4 + ...Tensorflow-Core-Protobuf-TensorBundle.html | 4 + ...orflow-Core-Protobuf-TensorflowServer.html | 4 + ...mini_Proto-Tensorflow-Core-Util-Event.html | 4 +- ...sorflow-Core-Util-MemmappedFileSystem.html | 4 + ...Tensorflow-Core-Util-SavedTensorSlice.html | 4 + ...ni_Proto-Tensorflow-Core-Util-TestLog.html | 4 + .../tensorflow-proto-0.1.0.0/ocean.css | 68 +- ...Proto.Tensorflow.Core.Example.Example.html | 214 + ...re.Example.ExampleParserConfiguration.html | 720 + ...Proto.Tensorflow.Core.Example.Feature.html | 598 + ....Core.Framework.AllocationDescription.html | 235 + ...o.Tensorflow.Core.Framework.AttrValue.html | 974 + ...o.Tensorflow.Core.Framework.CostGraph.html | 821 + ...rflow.Core.Framework.DeviceAttributes.html | 282 + ...to.Tensorflow.Core.Framework.Function.html | 491 + ...Proto.Tensorflow.Core.Framework.Graph.html | 206 + ...o.Tensorflow.Core.Framework.KernelDef.html | 284 + ...o.Tensorflow.Core.Framework.LogMemory.html | 777 + ...oto.Tensorflow.Core.Framework.NodeDef.html | 277 + ...Proto.Tensorflow.Core.Framework.OpDef.html | 895 + ...sorflow.Core.Framework.ResourceHandle.html | 193 + ...o.Tensorflow.Core.Framework.StepStats.html | 1024 + ...oto.Tensorflow.Core.Framework.Summary.html | 1299 + ...roto.Tensorflow.Core.Framework.Tensor.html | 457 + ...flow.Core.Framework.TensorDescription.html | 202 + ...Tensorflow.Core.Framework.TensorShape.html | 171 + ...Tensorflow.Core.Framework.TensorSlice.html | 203 + ...Proto.Tensorflow.Core.Framework.Types.html | 346 + ...to.Tensorflow.Core.Framework.Variable.html | 332 + ...to.Tensorflow.Core.Framework.Versions.html | 122 + ...o.Tensorflow.Core.Lib.Core.ErrorCodes.html | 199 + ...roto.Tensorflow.Core.Protobuf.Cluster.html | 200 + ...Proto.Tensorflow.Core.Protobuf.Config.html | 2048 + ....Tensorflow.Core.Protobuf.ControlFlow.html | 651 + .../Proto.Tensorflow.Core.Protobuf.Debug.html | 273 + ...to.Tensorflow.Core.Protobuf.MetaGraph.html | 1918 + ....Tensorflow.Core.Protobuf.NamedTensor.html | 125 + ....Tensorflow.Core.Protobuf.QueueRunner.html | 198 + ...nsorflow.Core.Protobuf.RewriterConfig.html | 378 + ...o.Tensorflow.Core.Protobuf.SavedModel.html | 107 + .../Proto.Tensorflow.Core.Protobuf.Saver.html | 302 + ...Tensorflow.Core.Protobuf.TensorBundle.html | 457 + ...orflow.Core.Protobuf.TensorflowServer.html | 231 + .../src/Proto.Tensorflow.Core.Util.Event.html | 888 + ...sorflow.Core.Util.MemmappedFileSystem.html | 159 + ...Tensorflow.Core.Util.SavedTensorSlice.html | 517 + .../Proto.Tensorflow.Core.Util.TestLog.html | 2282 + .../tensorflow-proto-0.1.0.0/src/highlight.js | 27 + .../tensorflow-proto-0.1.0.0/src/style.css | 55 + .../tensorflow-proto.txt | 920 - .../TensorFlow-Queue.html | 8 - .../tensorflow-queue-0.1.0.0/doc-index.html | 4 - .../tensorflow-queue-0.1.0.0/frames.html | 30 - .../tensorflow-queue-0.1.0.0/hslogo-16.png | Bin 1684 -> 0 bytes .../index-frames.html | 4 - .../tensorflow-queue-0.1.0.0/index.html | 4 - .../tensorflow-queue-0.1.0.0/minus.gif | Bin 56 -> 0 bytes .../haddock/tensorflow-queue-0.1.0.0/plus.gif | Bin 59 -> 0 bytes .../tensorflow-queue-0.1.0.0/synopsis.png | Bin 11327 -> 0 bytes .../tensorflow-queue.txt | 25 - .../tensorflow-records-0.1.0.0/LICENSE | 203 + .../TensorFlow-Records.html | 6 +- .../tensorflow-records-0.1.0.0/doc-index.html | 4 +- .../tensorflow-records-0.1.0.0/frames.html | 30 - .../haddock-util.js | 50 +- .../index-frames.html | 4 - .../tensorflow-records-0.1.0.0/index.html | 4 +- .../mini_TensorFlow-Records.html | 2 +- .../tensorflow-records-0.1.0.0/ocean.css | 68 +- .../src/TensorFlow.CRC32C.html | 62 + .../src/TensorFlow.Records.html | 135 + .../src/highlight.js | 27 + .../tensorflow-records-0.1.0.0/src/style.css | 55 + .../tensorflow-records.txt | 35 - .../LICENSE | 203 + .../TensorFlow-Records-Conduit.html | 4 +- .../doc-index.html | 4 +- .../frames.html | 30 - .../haddock-util.js | 50 +- .../index-frames.html | 4 - .../index.html | 4 +- .../mini_TensorFlow-Records-Conduit.html | 2 +- .../ocean.css | 68 +- .../src/TensorFlow.Records.Conduit.html | 54 + .../src/highlight.js | 27 + .../src/style.css | 55 + .../tensorflow-records-conduit.txt | 25 - docs/haddock/tensorflow-test-0.1.0.0/LICENSE | 203 + .../TensorFlow-Test.html | 6 +- .../tensorflow-test-0.1.0.0/doc-index.html | 4 +- .../tensorflow-test-0.1.0.0/frames.html | 30 - .../tensorflow-test-0.1.0.0/haddock-util.js | 50 +- .../tensorflow-test-0.1.0.0/index-frames.html | 4 - .../tensorflow-test-0.1.0.0/index.html | 5 +- .../mini_TensorFlow-Test.html | 2 +- .../haddock/tensorflow-test-0.1.0.0/ocean.css | 68 +- .../src/TensorFlow.Test.html | 33 + .../tensorflow-test-0.1.0.0/src/highlight.js | 27 + .../tensorflow-test-0.1.0.0/src/style.css | 55 + .../tensorflow-test.txt | 16 - 475 files changed, 83913 insertions(+), 15095 deletions(-) delete mode 100644 docs/haddock/doc-index-124.html create mode 100644 docs/haddock/doc-index-J.html delete mode 100644 docs/haddock/frames.html delete mode 100644 docs/haddock/index-frames.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Core.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/doc-index-124.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/doc-index-All.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/doc-index-R.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/haddock-util.js delete mode 100644 docs/haddock/tensorflow-0.1.0.0/index-frames.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/index.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Core.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html delete mode 100644 docs/haddock/tensorflow-0.1.0.0/ocean.css delete mode 100644 docs/haddock/tensorflow-0.1.0.0/tensorflow.txt create mode 100644 docs/haddock/tensorflow-0.1.0.2/LICENSE create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-Build.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-BuildOp.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-ControlFlow.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-Core.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-Internal-FFI.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-Internal-VarInt.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-Nodes.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-Output.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-Session.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-Tensor.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/TensorFlow-Types.html rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-47.html (87%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-58.html (85%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-92.html (85%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-95.html (87%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-A.html (56%) create mode 100644 docs/haddock/tensorflow-0.1.0.2/doc-index-All.html rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-B.html (90%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-C.html (89%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-D.html (96%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-E.html (90%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-F.html (91%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-G.html (88%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-H.html (85%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-I.html (85%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-L.html (87%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-M.html (85%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-N.html (89%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-O.html (94%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-P.html (89%) create mode 100644 docs/haddock/tensorflow-0.1.0.2/doc-index-R.html rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-S.html (58%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-T.html (88%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-U.html (90%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-V.html (88%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index-W.html (90%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/doc-index.html (81%) rename docs/haddock/{tensorflow-queue-0.1.0.0 => tensorflow-0.1.0.2}/haddock-util.js (91%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/hslogo-16.png (100%) create mode 100644 docs/haddock/tensorflow-0.1.0.2/index.html rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/mini_TensorFlow-Build.html (96%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/mini_TensorFlow-BuildOp.html (88%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/mini_TensorFlow-ControlFlow.html (83%) create mode 100644 docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Core.html rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/mini_TensorFlow-Internal-FFI.html (91%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/mini_TensorFlow-Internal-VarInt.html (80%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/mini_TensorFlow-Nodes.html (86%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/mini_TensorFlow-Output.html (88%) create mode 100644 docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Session.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Tensor.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Types.html rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/minus.gif (100%) rename docs/haddock/{tensorflow-queue-0.1.0.0 => tensorflow-0.1.0.2}/ocean.css (92%) rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/plus.gif (100%) create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Build.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.BuildOp.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.ControlFlow.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Core.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.FFI.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.Raw.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.VarInt.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Nodes.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Output.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Session.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Tensor.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Types.html create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/highlight.js create mode 100644 docs/haddock/tensorflow-0.1.0.2/src/style.css rename docs/haddock/{tensorflow-0.1.0.0 => tensorflow-0.1.0.2}/synopsis.png (100%) create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/LICENSE delete mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow.GenOps.Core.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt create mode 100644 docs/haddock/tensorflow-logging-0.1.0.0/LICENSE delete mode 100644 docs/haddock/tensorflow-logging-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-logging-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-logging-0.1.0.0/src/TensorFlow.Logging.html create mode 100644 docs/haddock/tensorflow-logging-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-logging-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-logging-0.1.0.0/tensorflow-logging.txt create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/LICENSE delete mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow.Examples.MNIST.Parse.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow.Examples.MNIST.TrainedGraph.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/tensorflow-mnist.txt create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/LICENSE delete mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow.Examples.MNIST.InputData.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/tensorflow-mnist-input-data.txt delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/TensorFlow-NN.html delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/doc-index.html delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/haddock-util.js delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/hslogo-16.png delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/index-frames.html delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/index.html delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/minus.gif delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/ocean.css delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/plus.gif delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/synopsis.png delete mode 100644 docs/haddock/tensorflow-nn-0.1.0.0/tensorflow-nn.txt create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/LICENSE delete mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow.OpGen.ParsedOp.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow.OpGen.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/LICENSE create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Minimize.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-NN.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Queue.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Variable.html delete mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Minimize.html rename docs/haddock/{tensorflow-nn-0.1.0.0 => tensorflow-ops-0.1.0.0}/mini_TensorFlow-NN.html (79%) rename docs/haddock/{tensorflow-queue-0.1.0.0 => tensorflow-ops-0.1.0.0}/mini_TensorFlow-Queue.html (84%) create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Variable.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.EmbeddingOps.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Gradient.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Minimize.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.NN.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Ops.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Queue.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Variable.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/LICENSE create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-Example.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-ExampleParserConfiguration.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-Feature.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AllocationDescription.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-CostGraph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-DeviceAttributes.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Function.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-KernelDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-LogMemory.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-StepStats.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorDescription.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorSlice.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Variable.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Versions.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Lib-Core-ErrorCodes.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Cluster.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-ControlFlow.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Debug.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-MetaGraph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-NamedTensor.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-QueueRunner.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-RewriterConfig.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-SavedModel.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Saver.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-TensorBundle.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-TensorflowServer.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-MemmappedFileSystem.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-SavedTensorSlice.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-TestLog.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-J.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-Q.html delete mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-Example.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-ExampleParserConfiguration.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-Feature.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AllocationDescription.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-CostGraph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-DeviceAttributes.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Function.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-KernelDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-LogMemory.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-StepStats.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorDescription.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorSlice.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Variable.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Versions.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Lib-Core-ErrorCodes.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Cluster.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-ControlFlow.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Debug.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-MetaGraph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-NamedTensor.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-QueueRunner.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-RewriterConfig.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-SavedModel.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Saver.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-TensorBundle.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-TensorflowServer.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-MemmappedFileSystem.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-SavedTensorSlice.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-TestLog.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.Example.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.ExampleParserConfiguration.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.Feature.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.AllocationDescription.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.AttrValue.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.CostGraph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.DeviceAttributes.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Function.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Graph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.KernelDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.LogMemory.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.NodeDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.OpDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.ResourceHandle.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.StepStats.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Summary.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Tensor.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorDescription.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorShape.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorSlice.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Types.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Variable.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Versions.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Lib.Core.ErrorCodes.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Cluster.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Config.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.ControlFlow.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Debug.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.MetaGraph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.NamedTensor.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.QueueRunner.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.RewriterConfig.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.SavedModel.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Saver.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.TensorBundle.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.TensorflowServer.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.Event.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.MemmappedFileSystem.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.SavedTensorSlice.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.TestLog.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/hslogo-16.png delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/index-frames.html delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/index.html delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/minus.gif delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/plus.gif delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/synopsis.png delete mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt create mode 100644 docs/haddock/tensorflow-records-0.1.0.0/LICENSE delete mode 100644 docs/haddock/tensorflow-records-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-records-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-records-0.1.0.0/src/TensorFlow.CRC32C.html create mode 100644 docs/haddock/tensorflow-records-0.1.0.0/src/TensorFlow.Records.html create mode 100644 docs/haddock/tensorflow-records-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-records-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-records-0.1.0.0/tensorflow-records.txt create mode 100644 docs/haddock/tensorflow-records-conduit-0.1.0.0/LICENSE delete mode 100644 docs/haddock/tensorflow-records-conduit-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-records-conduit-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-records-conduit-0.1.0.0/src/TensorFlow.Records.Conduit.html create mode 100644 docs/haddock/tensorflow-records-conduit-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-records-conduit-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-records-conduit-0.1.0.0/tensorflow-records-conduit.txt create mode 100644 docs/haddock/tensorflow-test-0.1.0.0/LICENSE delete mode 100644 docs/haddock/tensorflow-test-0.1.0.0/frames.html delete mode 100644 docs/haddock/tensorflow-test-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-test-0.1.0.0/src/TensorFlow.Test.html create mode 100644 docs/haddock/tensorflow-test-0.1.0.0/src/highlight.js create mode 100644 docs/haddock/tensorflow-test-0.1.0.0/src/style.css delete mode 100644 docs/haddock/tensorflow-test-0.1.0.0/tensorflow-test.txt diff --git a/docs/haddock/doc-index-124.html b/docs/haddock/doc-index-124.html deleted file mode 100644 index f837e6d..0000000 --- a/docs/haddock/doc-index-124.html +++ /dev/null @@ -1,4 +0,0 @@ - (Index - |)

 

Index - |

|:|TensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/doc-index-47.html b/docs/haddock/doc-index-47.html index 58ca838..7c67cad 100644 --- a/docs/haddock/doc-index-47.html +++ b/docs/haddock/doc-index-47.html @@ -1,4 +1,4 @@ - (Index - /)

 

\ No newline at end of file +

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-58.html b/docs/haddock/doc-index-58.html index 00e243b..adc02a4 100644 --- a/docs/haddock/doc-index-58.html +++ b/docs/haddock/doc-index-58.html @@ -1,4 +1,4 @@ - (Index - :)

 

Index - :

:/TensorFlow.Types
\ No newline at end of file +

 

Index - :

:/TensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/doc-index-92.html b/docs/haddock/doc-index-92.html index 249213d..a9f7980 100644 --- a/docs/haddock/doc-index-92.html +++ b/docs/haddock/doc-index-92.html @@ -1,4 +1,4 @@ - (Index - \)

 

Index - \

\\TensorFlow.Types
\ No newline at end of file +

 

Index - \

\\TensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/doc-index-95.html b/docs/haddock/doc-index-95.html index 3ec6280..9469581 100644 --- a/docs/haddock/doc-index-95.html +++ b/docs/haddock/doc-index-95.html @@ -1,4 +1,4 @@ - (Index - _)

 

Index - _

_ArgTensorFlow.GenOps.Core
_Arg'TensorFlow.GenOps.Core
_ArrayToListTensorFlow.GenOps.Core
_ArrayToList'TensorFlow.GenOps.Core
_AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_Event'fileVersionProto.Tensorflow.Core.Util.Event
_Event'graphDefProto.Tensorflow.Core.Util.Event
_Event'logMessageProto.Tensorflow.Core.Util.Event
_Event'metaGraphDefProto.Tensorflow.Core.Util.Event
_Event'sessionLogProto.Tensorflow.Core.Util.Event
_Event'stepProto.Tensorflow.Core.Util.Event
_Event'summaryProto.Tensorflow.Core.Util.Event
_Event'taggedRunMetadataProto.Tensorflow.Core.Util.Event
_Event'wallTimeProto.Tensorflow.Core.Util.Event
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
_HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
_HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
_HistogramProto'minProto.Tensorflow.Core.Framework.Summary
_HistogramProto'numProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
_HostCastTensorFlow.GenOps.Core
_HostCast'TensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostRecv'TensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_HostSend'TensorFlow.GenOps.Core
_ListToArrayTensorFlow.GenOps.Core
_ListToArray'TensorFlow.GenOps.Core
_LogMessage'levelProto.Tensorflow.Core.Util.Event
_LogMessage'messageProto.Tensorflow.Core.Util.Event
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_ParallelConcatStartTensorFlow.GenOps.Core
_ParallelConcatStart'TensorFlow.GenOps.Core
_ParallelConcatUpdateTensorFlow.GenOps.Core
_ParallelConcatUpdate'TensorFlow.GenOps.Core
_RecvTensorFlow.GenOps.Core
_Recv'TensorFlow.GenOps.Core
_ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_Retval'TensorFlow.GenOps.Core
_RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SendTensorFlow.GenOps.Core
_Send'TensorFlow.GenOps.Core
_SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
_SessionLog'msgProto.Tensorflow.Core.Util.Event
_SessionLog'statusProto.Tensorflow.Core.Util.Event
_Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
_Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
_Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
_Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
_Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
_Summary'valueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'audioProto.Tensorflow.Core.Framework.Summary
_Summary'Value'histoProto.Tensorflow.Core.Framework.Summary
_Summary'Value'imageProto.Tensorflow.Core.Framework.Summary
_Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
_Summary'Value'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
_Summary'Value'simpleValueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tensorProto.Tensorflow.Core.Framework.Summary
_SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
_TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
_TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file +

 

Index - _

_AllocationDescription'allocatedBytesProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'allocationIdProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'allocatorNameProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'hasSingleReferenceProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'ptrProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'requestedBytesProto.Tensorflow.Core.Framework.AllocationDescription
_AllocatorMemoryUsed'allocatorBytesInUseProto.Tensorflow.Core.Framework.StepStats
_AllocatorMemoryUsed'allocatorNameProto.Tensorflow.Core.Framework.StepStats
_AllocatorMemoryUsed'liveBytesProto.Tensorflow.Core.Framework.StepStats
_AllocatorMemoryUsed'peakBytesProto.Tensorflow.Core.Framework.StepStats
_AllocatorMemoryUsed'totalBytesProto.Tensorflow.Core.Framework.StepStats
_ArgTensorFlow.GenOps.Core
_Arg'TensorFlow.GenOps.Core
_ArrayToListTensorFlow.GenOps.Core
_ArrayToList'TensorFlow.GenOps.Core
_AssetFileDef'filenameProto.Tensorflow.Core.Protobuf.MetaGraph
_AssetFileDef'tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'valueProto.Tensorflow.Core.Framework.AttrValue
_AutoParallelOptions'enableProto.Tensorflow.Core.Protobuf.RewriterConfig
_AutoParallelOptions'numReplicasProto.Tensorflow.Core.Protobuf.RewriterConfig
_AvailableDeviceInfo'memoryLimitProto.Tensorflow.Core.Util.TestLog
_AvailableDeviceInfo'nameProto.Tensorflow.Core.Util.TestLog
_AvailableDeviceInfo'physicalDescriptionProto.Tensorflow.Core.Util.TestLog
_AvailableDeviceInfo'type'Proto.Tensorflow.Core.Util.TestLog
_BenchmarkEntries'entryProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'cpuTimeProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'extrasProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'ExtrasEntry'keyProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'ExtrasEntry'valueProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'itersProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'nameProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'throughputProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'wallTimeProto.Tensorflow.Core.Util.TestLog
_BuildConfiguration'ccFlagsProto.Tensorflow.Core.Util.TestLog
_BuildConfiguration'modeProto.Tensorflow.Core.Util.TestLog
_BuildConfiguration'optsProto.Tensorflow.Core.Util.TestLog
_BundleEntryProto'crc32cProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'dtypeProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'offsetProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'shapeProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'shardIdProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'sizeProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'slicesProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleHeaderProto'endiannessProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleHeaderProto'numShardsProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleHeaderProto'versionProto.Tensorflow.Core.Protobuf.TensorBundle
_BytesList'valueProto.Tensorflow.Core.Example.Feature
_ClusterDef'jobProto.Tensorflow.Core.Protobuf.Cluster
_CollectionDef'AnyList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'BytesList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'FloatList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'Int64List'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'kindProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'NodeList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CommitId'kindProto.Tensorflow.Core.Util.TestLog
_CommitId'snapshotProto.Tensorflow.Core.Util.TestLog
_CondContextDef'branchProto.Tensorflow.Core.Protobuf.ControlFlow
_CondContextDef'contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
_CondContextDef'pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
_CondContextDef'predNameProto.Tensorflow.Core.Protobuf.ControlFlow
_CondContextDef'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'clusterDefProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_CostGraphDef'nodeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'computeCostProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'computeTimeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'controlInputProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'deviceProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'devicePersistentMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'deviceTempMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'hostPersistentMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'hostTempMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'idProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'inputInfoProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'InputInfo'precedingNodeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'InputInfo'precedingPortProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'isFinalProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'memoryTimeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'nameProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'outputInfoProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'OutputInfo'aliasInputPortProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'OutputInfo'dtypeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'OutputInfo'shapeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'OutputInfo'sizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'temporaryMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CPUInfo'cacheSizeProto.Tensorflow.Core.Util.TestLog
_CPUInfo'CacheSizeEntry'keyProto.Tensorflow.Core.Util.TestLog
_CPUInfo'CacheSizeEntry'valueProto.Tensorflow.Core.Util.TestLog
_CPUInfo'cpuGovernorProto.Tensorflow.Core.Util.TestLog
_CPUInfo'cpuInfoProto.Tensorflow.Core.Util.TestLog
_CPUInfo'mhzPerCpuProto.Tensorflow.Core.Util.TestLog
_CPUInfo'numCoresProto.Tensorflow.Core.Util.TestLog
_CPUInfo'numCoresAllowedProto.Tensorflow.Core.Util.TestLog
_DebugOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Debug
_DebugOptions'globalStepProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'tolerateDebugOpCreationFailuresProto.Tensorflow.Core.Protobuf.Debug
_DeviceAttributes'deviceTypeProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'incarnationProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'localityProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'memoryLimitProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'nameProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'physicalDeviceDescProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceLocality'busIdProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceStepStats'deviceProto.Tensorflow.Core.Framework.StepStats
_DeviceStepStats'nodeStatsProto.Tensorflow.Core.Framework.StepStats
_EntryValue'kindProto.Tensorflow.Core.Util.TestLog
_Event'stepProto.Tensorflow.Core.Util.Event
_Event'wallTimeProto.Tensorflow.Core.Util.Event
_Event'whatProto.Tensorflow.Core.Util.Event
_Example'featuresProto.Tensorflow.Core.Example.Example
_ExampleParserConfiguration'featureMapProto.Tensorflow.Core.Example.ExampleParserConfiguration
_ExampleParserConfiguration'FeatureMapEntry'keyProto.Tensorflow.Core.Example.ExampleParserConfiguration
_ExampleParserConfiguration'FeatureMapEntry'valueProto.Tensorflow.Core.Example.ExampleParserConfiguration
_Feature'kindProto.Tensorflow.Core.Example.Feature
_FeatureConfiguration'configProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FeatureList'featureProto.Tensorflow.Core.Example.Feature
_FeatureLists'featureListProto.Tensorflow.Core.Example.Feature
_FeatureLists'FeatureListEntry'keyProto.Tensorflow.Core.Example.Feature
_FeatureLists'FeatureListEntry'valueProto.Tensorflow.Core.Example.Feature
_Features'featureProto.Tensorflow.Core.Example.Feature
_Features'FeatureEntry'keyProto.Tensorflow.Core.Example.Feature
_Features'FeatureEntry'valueProto.Tensorflow.Core.Example.Feature
_FixedLenFeatureProto'defaultValueProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FixedLenFeatureProto'dtypeProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FixedLenFeatureProto'shapeProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FixedLenFeatureProto'valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FloatList'valueProto.Tensorflow.Core.Example.Feature
_FunctionDef'attrProto.Tensorflow.Core.Framework.Function
_FunctionDef'AttrEntry'keyProto.Tensorflow.Core.Framework.Function
_FunctionDef'AttrEntry'valueProto.Tensorflow.Core.Framework.Function
_FunctionDef'nodeDefProto.Tensorflow.Core.Framework.Function
_FunctionDef'retProto.Tensorflow.Core.Framework.Function
_FunctionDef'RetEntry'keyProto.Tensorflow.Core.Framework.Function
_FunctionDef'RetEntry'valueProto.Tensorflow.Core.Framework.Function
_FunctionDef'signatureProto.Tensorflow.Core.Framework.Function
_FunctionDefLibrary'functionProto.Tensorflow.Core.Framework.Function
_FunctionDefLibrary'gradientProto.Tensorflow.Core.Framework.Function
_GPUInfo'busIdProto.Tensorflow.Core.Util.TestLog
_GPUInfo'modelProto.Tensorflow.Core.Util.TestLog
_GPUInfo'uuidProto.Tensorflow.Core.Util.TestLog
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'forceGpuCompatibleProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'pollingActiveDelayUsecsProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'pollingInactiveDelayMsecsProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GradientDef'functionNameProto.Tensorflow.Core.Framework.Function
_GradientDef'gradientFuncProto.Tensorflow.Core.Framework.Function
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
_HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
_HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
_HistogramProto'minProto.Tensorflow.Core.Framework.Summary
_HistogramProto'numProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
_HostCastTensorFlow.GenOps.Core
_HostCast'TensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostRecv'TensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_HostSend'TensorFlow.GenOps.Core
_Int64List'valueProto.Tensorflow.Core.Example.Feature
_JobDef'nameProto.Tensorflow.Core.Protobuf.Cluster
_JobDef'tasksProto.Tensorflow.Core.Protobuf.Cluster
_JobDef'TasksEntry'keyProto.Tensorflow.Core.Protobuf.Cluster
_JobDef'TasksEntry'valueProto.Tensorflow.Core.Protobuf.Cluster
_KernelDef'AttrConstraint'allowedValuesProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'AttrConstraint'nameProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'constraintProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'deviceTypeProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'hostMemoryArgProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'labelProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'opProto.Tensorflow.Core.Framework.KernelDef
_ListToArrayTensorFlow.GenOps.Core
_ListToArray'TensorFlow.GenOps.Core
_LogMessage'levelProto.Tensorflow.Core.Util.Event
_LogMessage'messageProto.Tensorflow.Core.Util.Event
_MachineConfiguration'availableDeviceInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'cpuInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'deviceInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'hostnameProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'memoryInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'platformInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'serialIdentifierProto.Tensorflow.Core.Util.TestLog
_MemmappedFileSystemDirectory'elementProto.Tensorflow.Core.Util.MemmappedFileSystem
_MemmappedFileSystemDirectoryElement'nameProto.Tensorflow.Core.Util.MemmappedFileSystem
_MemmappedFileSystemDirectoryElement'offsetProto.Tensorflow.Core.Util.MemmappedFileSystem
_MemoryInfo'availableProto.Tensorflow.Core.Util.TestLog
_MemoryInfo'totalProto.Tensorflow.Core.Util.TestLog
_MemoryLogRawAllocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'numBytesProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'operationProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'ptrProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'deferredProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'operationProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogStep'handleProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogStep'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorAllocation'kernelNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorAllocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorAllocation'tensorProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorDeallocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorDeallocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorOutput'indexProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorOutput'kernelNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorOutput'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorOutput'tensorProto.Tensorflow.Core.Framework.LogMemory
_MemoryStats'devicePersistentMemorySizeProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'devicePersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'deviceTempMemorySizeProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'hostPersistentMemorySizeProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'hostPersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'hostTempMemorySizeProto.Tensorflow.Core.Framework.StepStats
_MetaGraphDef'assetFileDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'collectionDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'CollectionDefEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'CollectionDefEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'graphDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'metaGraphVersionProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'tagsProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'tensorflowGitVersionProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'tensorflowVersionProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'signatureDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'SignatureDefEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'SignatureDefEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NamedTensorProto'nameProto.Tensorflow.Core.Protobuf.NamedTensor
_NamedTensorProto'tensorProto.Tensorflow.Core.Protobuf.NamedTensor
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_NodeExecStats'allEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'allStartMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'memoryProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'memoryStatsProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'nodeNameProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'opEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'opStartRelMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'outputProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'referencedTensorProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'scheduledMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'threadIdProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'timelineLabelProto.Tensorflow.Core.Framework.StepStats
_NodeOutput'slotProto.Tensorflow.Core.Framework.StepStats
_NodeOutput'tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_ParallelConcatStartTensorFlow.GenOps.Core
_ParallelConcatStart'TensorFlow.GenOps.Core
_ParallelConcatUpdateTensorFlow.GenOps.Core
_ParallelConcatUpdate'TensorFlow.GenOps.Core
_PlatformInfo'bitsProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'linkageProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'machineProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'releaseProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'systemProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'versionProto.Tensorflow.Core.Util.TestLog
_QueueRunnerDef'cancelOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
_QueueRunnerDef'closeOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
_QueueRunnerDef'enqueueOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
_QueueRunnerDef'queueClosedExceptionTypesProto.Tensorflow.Core.Protobuf.QueueRunner
_QueueRunnerDef'queueNameProto.Tensorflow.Core.Protobuf.QueueRunner
_RecvTensorFlow.GenOps.Core
_Recv'TensorFlow.GenOps.Core
_ResourceHandleProto'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandleProto'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandleProto'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandleProto'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandleProto'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_Retval'TensorFlow.GenOps.Core
_RewriterConfig'autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'constantFoldingProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'disableModelPruningProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'memoryOptimizationProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'optimizersProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'optimizeTensorLayoutProto.Tensorflow.Core.Protobuf.RewriterConfig
_RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
_RunConfiguration'argumentProto.Tensorflow.Core.Util.TestLog
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SavedModel'metaGraphsProto.Tensorflow.Core.Protobuf.SavedModel
_SavedModel'savedModelSchemaVersionProto.Tensorflow.Core.Protobuf.SavedModel
_SavedSlice'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSlice'nameProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSlice'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSliceMeta'nameProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSliceMeta'shapeProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSliceMeta'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSliceMeta'type'Proto.Tensorflow.Core.Util.SavedTensorSlice
_SavedTensorSliceMeta'tensorProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedTensorSliceMeta'versionsProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedTensorSlices'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
_SavedTensorSlices'metaProto.Tensorflow.Core.Util.SavedTensorSlice
_SaverDef'filenameTensorNameProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'keepCheckpointEveryNHoursProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'maxToKeepProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'restoreOpNameProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'saveTensorNameProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'shardedProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'versionProto.Tensorflow.Core.Protobuf.Saver
_SaveSliceInfoDef'fullNameProto.Tensorflow.Core.Framework.Variable
_SaveSliceInfoDef'fullShapeProto.Tensorflow.Core.Framework.Variable
_SaveSliceInfoDef'varOffsetProto.Tensorflow.Core.Framework.Variable
_SaveSliceInfoDef'varShapeProto.Tensorflow.Core.Framework.Variable
_SendTensorFlow.GenOps.Core
_Send'TensorFlow.GenOps.Core
_SequenceExample'contextProto.Tensorflow.Core.Example.Example
_SequenceExample'featureListsProto.Tensorflow.Core.Example.Example
_ServerDef'clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
_ServerDef'defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
_ServerDef'jobNameProto.Tensorflow.Core.Protobuf.TensorflowServer
_ServerDef'protocolProto.Tensorflow.Core.Protobuf.TensorflowServer
_ServerDef'taskIndexProto.Tensorflow.Core.Protobuf.TensorflowServer
_SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
_SessionLog'msgProto.Tensorflow.Core.Util.Event
_SessionLog'statusProto.Tensorflow.Core.Util.Event
_SignatureDef'inputsProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'InputsEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'InputsEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'methodNameProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'outputsProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'OutputsEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'OutputsEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_StepStats'devStatsProto.Tensorflow.Core.Framework.StepStats
_Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
_Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
_Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
_Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
_Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
_Summary'valueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'metadataProto.Tensorflow.Core.Framework.Summary
_Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
_Summary'Value'valueProto.Tensorflow.Core.Framework.Summary
_SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'displayNameProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'pluginDataProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'PluginData'contentProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'PluginData'pluginNameProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'summaryDescriptionProto.Tensorflow.Core.Framework.Summary
_TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
_TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
_TensorDescription'allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
_TensorDescription'dtypeProto.Tensorflow.Core.Framework.TensorDescription
_TensorDescription'shapeProto.Tensorflow.Core.Framework.TensorDescription
_TensorInfo'CooSparse'denseShapeTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'CooSparse'indicesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'CooSparse'valuesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'dtypeProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'encodingProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'tensorShapeProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_TensorSliceProto'extentProto.Tensorflow.Core.Framework.TensorSlice
_TensorSliceProto'Extent'hasLengthProto.Tensorflow.Core.Framework.TensorSlice
_TensorSliceProto'Extent'startProto.Tensorflow.Core.Framework.TensorSlice
_TestResults'benchmarkTypeProto.Tensorflow.Core.Util.TestLog
_TestResults'buildConfigurationProto.Tensorflow.Core.Util.TestLog
_TestResults'commitIdProto.Tensorflow.Core.Util.TestLog
_TestResults'entriesProto.Tensorflow.Core.Util.TestLog
_TestResults'machineConfigurationProto.Tensorflow.Core.Util.TestLog
_TestResults'nameProto.Tensorflow.Core.Util.TestLog
_TestResults'runConfigurationProto.Tensorflow.Core.Util.TestLog
_TestResults'runModeProto.Tensorflow.Core.Util.TestLog
_TestResults'runTimeProto.Tensorflow.Core.Util.TestLog
_TestResults'startTimeProto.Tensorflow.Core.Util.TestLog
_TestResults'targetProto.Tensorflow.Core.Util.TestLog
_ThreadPoolOptionProto'globalNameProto.Tensorflow.Core.Protobuf.Config
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
_UnsafeReadVariableTensorFlow.GenOps.Core
_UnsafeReadVariable'TensorFlow.GenOps.Core
_ValuesDef'externalValuesProto.Tensorflow.Core.Protobuf.ControlFlow
_ValuesDef'ExternalValuesEntry'keyProto.Tensorflow.Core.Protobuf.ControlFlow
_ValuesDef'ExternalValuesEntry'valueProto.Tensorflow.Core.Protobuf.ControlFlow
_ValuesDef'valuesProto.Tensorflow.Core.Protobuf.ControlFlow
_VariableDef'initializerNameProto.Tensorflow.Core.Framework.Variable
_VariableDef'isResourceProto.Tensorflow.Core.Framework.Variable
_VariableDef'saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
_VariableDef'snapshotNameProto.Tensorflow.Core.Framework.Variable
_VariableDef'variableNameProto.Tensorflow.Core.Framework.Variable
_VarLenFeatureProto'dtypeProto.Tensorflow.Core.Example.ExampleParserConfiguration
_VarLenFeatureProto'indicesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
_VarLenFeatureProto'shapesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
_VarLenFeatureProto'valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
_VersionDef'badConsumersProto.Tensorflow.Core.Framework.Versions
_VersionDef'minConsumerProto.Tensorflow.Core.Framework.Versions
_VersionDef'producerProto.Tensorflow.Core.Framework.Versions
_WhileContextDef'backPropProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'loopEnterNamesProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'loopExitNamesProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'parallelIterationsProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'pivotForBodyNameProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'pivotForPredNameProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'swapMemoryProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
\ No newline at end of file diff --git a/docs/haddock/doc-index-A.html b/docs/haddock/doc-index-A.html index c983946..4b1ac0d 100644 --- a/docs/haddock/doc-index-A.html +++ b/docs/haddock/doc-index-A.html @@ -1,4 +1,4 @@ - (Index - A)

 

Index - A

abortTensorFlow.GenOps.Core
abort'TensorFlow.GenOps.Core
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
abs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorApplyGradient'TensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorNumAccumulated'TensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorSetGlobalStep'TensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
accumulatorTakeGradient'TensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
acos'TensorFlow.GenOps.Core
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
add' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build
addManySparseToTensorsMapTensorFlow.GenOps.Core
addManySparseToTensorsMap'TensorFlow.GenOps.Core
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addN' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSparseToTensorsMapTensorFlow.GenOps.Core
addSparseToTensorsMap'TensorFlow.GenOps.Core
addSummaryTensorFlow.Tensor
adjustContrastTensorFlow.GenOps.Core
adjustContrast'TensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustContrastv2'TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
adjustHue'TensorFlow.GenOps.Core
adjustSaturationTensorFlow.GenOps.Core
adjustSaturation'TensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
all'TensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allCandidateSampler'TensorFlow.GenOps.Core
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValuesProto.Tensorflow.Core.Framework.OpDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
AllTensorTypesTensorFlow.Types
anyTensorFlow.GenOps.Core
any'TensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdadelta'TensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagrad'TensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdagradDA'TensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyAdam'TensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyCenteredRMSProp'TensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyFtrl'TensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyGradientDescent'TensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyMomentum'TensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalAdagrad'TensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyProximalGradientDescent'TensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
applyRMSProp'TensorFlow.GenOps.Core
argCaseKindTensorFlow.OpGen.ParsedOp
ArgKindTensorFlow.OpGen.ParsedOp
argKindTensorFlow.OpGen.ParsedOp
argLengthTensorFlow.OpGen.ParsedOp
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
argMin'TensorFlow.GenOps.Core
ArgSomeTensorTensorFlow.OpGen.ParsedOp
ArgTensorBuildTensorFlow.OpGen.ParsedOp
ArgTensorRefTensorFlow.OpGen.ParsedOp
ArgTensorValueTensorFlow.OpGen.ParsedOp
ArgTypeTensorFlow.OpGen.ParsedOp
argTypeTensorFlow.OpGen.ParsedOp
ArgTypeAttrTensorFlow.OpGen.ParsedOp
argTypeAttrTensorFlow.OpGen.ParsedOp
ArgTypeFixedTensorFlow.OpGen.ParsedOp
asGraphDefTensorFlow.Build, TensorFlow.Core
asinTensorFlow.GenOps.Core
asin'TensorFlow.GenOps.Core
assertTensorFlow.GenOps.Core
assert'TensorFlow.GenOps.Core
assertAllCloseTensorFlow.Test
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assignAddTensorFlow.GenOps.Core
assignAdd'TensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignAddVariableOp'TensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignSub'TensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
assignVariableOp'TensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asString'TensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
atanTensorFlow.GenOps.Core
atan'TensorFlow.GenOps.Core
Attr 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
attr 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBaseTypeTensorFlow.OpGen.ParsedOp
AttrBoolTensorFlow.OpGen.ParsedOp
AttrBytesTensorFlow.OpGen.ParsedOp
attrDescriptionTensorFlow.OpGen.ParsedOp
AttrFloatTensorFlow.OpGen.ParsedOp
AttributeTensorFlow.Types
attrInfoTensorFlow.OpGen.ParsedOp
AttrInt64TensorFlow.OpGen.ParsedOp
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.ParsedOp
attrNameTensorFlow.OpGen.ParsedOp
AttrShapeTensorFlow.OpGen.ParsedOp
AttrSingleTensorFlow.OpGen.ParsedOp
AttrTensorTensorFlow.OpGen.ParsedOp
AttrType 
1 (Type/Class)TensorFlow.OpGen.ParsedOp
2 (Data Constructor)TensorFlow.OpGen.ParsedOp
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
audioProto.Tensorflow.Core.Framework.Summary
audioSummaryTensorFlow.GenOps.Core
audioSummary'TensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
audioSummaryV2'TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool'TensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3D'TensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPool3DGrad'TensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
avgPoolGrad'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - A

abortTensorFlow.GenOps.Core
abort'TensorFlow.GenOps.Core
ABORTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
abs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorApplyGradient'TensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorNumAccumulated'TensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorSetGlobalStep'TensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
accumulatorTakeGradient'TensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
acos'TensorFlow.GenOps.Core
acoshTensorFlow.GenOps.Core
acosh'TensorFlow.GenOps.Core
adamTensorFlow.Minimize
adam'TensorFlow.Minimize
adamBeta1TensorFlow.Minimize
adamBeta2TensorFlow.Minimize
AdamConfig 
1 (Data Constructor)TensorFlow.Minimize
2 (Type/Class)TensorFlow.Minimize
adamEpsilonTensorFlow.Minimize
adamLearningRateTensorFlow.Minimize
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
add' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build, TensorFlow.Core
addManySparseToTensorsMapTensorFlow.GenOps.Core
addManySparseToTensorsMap'TensorFlow.GenOps.Core
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addN' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSparseToTensorsMapTensorFlow.GenOps.Core
addSparseToTensorsMap'TensorFlow.GenOps.Core
addSummaryTensorFlow.Tensor
adjustContrastTensorFlow.GenOps.Core
adjustContrast'TensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustContrastv2'TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
adjustHue'TensorFlow.GenOps.Core
adjustSaturationTensorFlow.GenOps.Core
adjustSaturation'TensorFlow.GenOps.Core
aliasInputPortProto.Tensorflow.Core.Framework.CostGraph
allTensorFlow.GenOps.Core
all'TensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allCandidateSampler'TensorFlow.GenOps.Core
allEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
allocatedBytesProto.Tensorflow.Core.Framework.AllocationDescription
AllocationDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AllocationDescription
2 (Type/Class)Proto.Tensorflow.Core.Framework.AllocationDescription
allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
allocationId 
1 (Function)Proto.Tensorflow.Core.Framework.LogMemory
2 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
allocatorBytesInUseProto.Tensorflow.Core.Framework.StepStats
AllocatorMemoryUsed 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
allocatorName 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.LogMemory
3 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValues 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.KernelDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
allStartMicrosProto.Tensorflow.Core.Framework.StepStats
AllTensorTypesTensorFlow.Types
ALREADY_EXISTSProto.Tensorflow.Core.Lib.Core.ErrorCodes
anyTensorFlow.GenOps.Core
any'TensorFlow.GenOps.Core
anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
anyListProto.Tensorflow.Core.Protobuf.MetaGraph
applyAdadeltaTensorFlow.GenOps.Core
applyAdadelta'TensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagrad'TensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdagradDA'TensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyAdam'TensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyCenteredRMSProp'TensorFlow.GenOps.Core
applyDelayCompensatedGradientDescentTensorFlow.GenOps.Core
applyDelayCompensatedGradientDescent'TensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyFtrl'TensorFlow.GenOps.Core
applyFtrlV2TensorFlow.GenOps.Core
applyFtrlV2'TensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyGradientDescent'TensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyMomentum'TensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalAdagrad'TensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyProximalGradientDescent'TensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
applyRMSProp'TensorFlow.GenOps.Core
approximateEqualTensorFlow.GenOps.Core
approximateEqual'TensorFlow.GenOps.Core
ArgKindTensorFlow.OpGen.ParsedOp
argKindTensorFlow.OpGen.ParsedOp
argLengthTensorFlow.OpGen.ParsedOp
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
argMin'TensorFlow.GenOps.Core
ArgSomeTensorTensorFlow.OpGen.ParsedOp
ArgTensorBuildTensorFlow.OpGen.ParsedOp
ArgTensorRefTensorFlow.OpGen.ParsedOp
ArgTensorValueTensorFlow.OpGen.ParsedOp
ArgTypeTensorFlow.OpGen.ParsedOp
argTypeTensorFlow.OpGen.ParsedOp
ArgTypeAttrTensorFlow.OpGen.ParsedOp
argTypeAttrTensorFlow.OpGen.ParsedOp
ArgTypeFixedTensorFlow.OpGen.ParsedOp
argumentProto.Tensorflow.Core.Util.TestLog
asGraphDefTensorFlow.Build, TensorFlow.Core
asinTensorFlow.GenOps.Core
asin'TensorFlow.GenOps.Core
asinhTensorFlow.GenOps.Core
asinh'TensorFlow.GenOps.Core
assertTensorFlow.GenOps.Core
assert'TensorFlow.GenOps.Core
assertAllCloseTensorFlow.Test
AssetFileDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
assetFileDefProto.Tensorflow.Core.Protobuf.MetaGraph
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
3 (Function)TensorFlow.Ops
assign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
3 (Function)TensorFlow.Ops
assignAdd 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
assignAdd' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
assignAddVariableOpTensorFlow.GenOps.Core
assignAddVariableOp'TensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignSub'TensorFlow.GenOps.Core
assignSubVariableOpTensorFlow.GenOps.Core
assignSubVariableOp'TensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
assignVariableOp'TensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asString'TensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
atanTensorFlow.GenOps.Core
atan'TensorFlow.GenOps.Core
atan2TensorFlow.GenOps.Core
atan2'TensorFlow.GenOps.Core
atanhTensorFlow.GenOps.Core
atanh'TensorFlow.GenOps.Core
Attr 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
attr 
1 (Function)Proto.Tensorflow.Core.Framework.Function
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBaseTypeTensorFlow.OpGen.ParsedOp
AttrBoolTensorFlow.OpGen.ParsedOp
AttrBytesTensorFlow.OpGen.ParsedOp
attrDescriptionTensorFlow.OpGen.ParsedOp
AttrFloatTensorFlow.OpGen.ParsedOp
AttributeTensorFlow.Types
attrInfoTensorFlow.OpGen.ParsedOp
AttrInt64TensorFlow.OpGen.ParsedOp
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.ParsedOp
attrNameTensorFlow.OpGen.ParsedOp
AttrShapeTensorFlow.OpGen.ParsedOp
AttrSingleTensorFlow.OpGen.ParsedOp
AttrTensorTensorFlow.OpGen.ParsedOp
AttrType 
1 (Type/Class)TensorFlow.OpGen.ParsedOp
2 (Data Constructor)TensorFlow.OpGen.ParsedOp
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'BProto.Tensorflow.Core.Framework.AttrValue
AttrValue'FProto.Tensorflow.Core.Framework.AttrValue
AttrValue'FuncProto.Tensorflow.Core.Framework.AttrValue
AttrValue'IProto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListProto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'PlaceholderProto.Tensorflow.Core.Framework.AttrValue
AttrValue'SProto.Tensorflow.Core.Framework.AttrValue
AttrValue'ShapeProto.Tensorflow.Core.Framework.AttrValue
AttrValue'TensorProto.Tensorflow.Core.Framework.AttrValue
AttrValue'TypeProto.Tensorflow.Core.Framework.AttrValue
AttrValue'ValueProto.Tensorflow.Core.Framework.AttrValue
audioProto.Tensorflow.Core.Framework.Summary
audioSpectrogramTensorFlow.GenOps.Core
audioSpectrogram'TensorFlow.GenOps.Core
audioSummaryTensorFlow.GenOps.Core
audioSummary'TensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
audioSummaryV2'TensorFlow.GenOps.Core
autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
AutoParallelOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.RewriterConfig
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.RewriterConfig
availableProto.Tensorflow.Core.Util.TestLog
AvailableDeviceInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
availableDeviceInfoProto.Tensorflow.Core.Util.TestLog
avgPoolTensorFlow.GenOps.Core
avgPool'TensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3D'TensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPool3DGrad'TensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
avgPoolGrad'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-All.html b/docs/haddock/doc-index-All.html index b8a8c45..21d8b52 100644 --- a/docs/haddock/doc-index-All.html +++ b/docs/haddock/doc-index-All.html @@ -1,4 +1,4 @@ - (Index)

 

Index

/:/TensorFlow.Types
/=TensorFlow.Types, TensorFlow.Core
:/TensorFlow.Types
abortTensorFlow.GenOps.Core
abort'TensorFlow.GenOps.Core
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
abs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorApplyGradient'TensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorNumAccumulated'TensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorSetGlobalStep'TensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
accumulatorTakeGradient'TensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
acos'TensorFlow.GenOps.Core
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
add' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build
addManySparseToTensorsMapTensorFlow.GenOps.Core
addManySparseToTensorsMap'TensorFlow.GenOps.Core
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addN' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSparseToTensorsMapTensorFlow.GenOps.Core
addSparseToTensorsMap'TensorFlow.GenOps.Core
addSummaryTensorFlow.Tensor
adjustContrastTensorFlow.GenOps.Core
adjustContrast'TensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustContrastv2'TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
adjustHue'TensorFlow.GenOps.Core
adjustSaturationTensorFlow.GenOps.Core
adjustSaturation'TensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
all'TensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allCandidateSampler'TensorFlow.GenOps.Core
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValuesProto.Tensorflow.Core.Framework.OpDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
AllTensorTypesTensorFlow.Types
anyTensorFlow.GenOps.Core
any'TensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdadelta'TensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagrad'TensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdagradDA'TensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyAdam'TensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyCenteredRMSProp'TensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyFtrl'TensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyGradientDescent'TensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyMomentum'TensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalAdagrad'TensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyProximalGradientDescent'TensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
applyRMSProp'TensorFlow.GenOps.Core
argCaseKindTensorFlow.OpGen.ParsedOp
ArgKindTensorFlow.OpGen.ParsedOp
argKindTensorFlow.OpGen.ParsedOp
argLengthTensorFlow.OpGen.ParsedOp
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
argMin'TensorFlow.GenOps.Core
ArgSomeTensorTensorFlow.OpGen.ParsedOp
ArgTensorBuildTensorFlow.OpGen.ParsedOp
ArgTensorRefTensorFlow.OpGen.ParsedOp
ArgTensorValueTensorFlow.OpGen.ParsedOp
ArgTypeTensorFlow.OpGen.ParsedOp
argTypeTensorFlow.OpGen.ParsedOp
ArgTypeAttrTensorFlow.OpGen.ParsedOp
argTypeAttrTensorFlow.OpGen.ParsedOp
ArgTypeFixedTensorFlow.OpGen.ParsedOp
asGraphDefTensorFlow.Build, TensorFlow.Core
asinTensorFlow.GenOps.Core
asin'TensorFlow.GenOps.Core
assertTensorFlow.GenOps.Core
assert'TensorFlow.GenOps.Core
assertAllCloseTensorFlow.Test
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assignAddTensorFlow.GenOps.Core
assignAdd'TensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignAddVariableOp'TensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignSub'TensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
assignVariableOp'TensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asString'TensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
atanTensorFlow.GenOps.Core
atan'TensorFlow.GenOps.Core
Attr 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
attr 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBaseTypeTensorFlow.OpGen.ParsedOp
AttrBoolTensorFlow.OpGen.ParsedOp
AttrBytesTensorFlow.OpGen.ParsedOp
attrDescriptionTensorFlow.OpGen.ParsedOp
AttrFloatTensorFlow.OpGen.ParsedOp
AttributeTensorFlow.Types
attrInfoTensorFlow.OpGen.ParsedOp
AttrInt64TensorFlow.OpGen.ParsedOp
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.ParsedOp
attrNameTensorFlow.OpGen.ParsedOp
AttrShapeTensorFlow.OpGen.ParsedOp
AttrSingleTensorFlow.OpGen.ParsedOp
AttrTensorTensorFlow.OpGen.ParsedOp
AttrType 
1 (Type/Class)TensorFlow.OpGen.ParsedOp
2 (Data Constructor)TensorFlow.OpGen.ParsedOp
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
audioProto.Tensorflow.Core.Framework.Summary
audioSummaryTensorFlow.GenOps.Core
audioSummary'TensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
audioSummaryV2'TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool'TensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3D'TensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPool3DGrad'TensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
avgPoolGrad'TensorFlow.GenOps.Core
bProto.Tensorflow.Core.Framework.AttrValue
barrierTensorFlow.GenOps.Core
barrier'TensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierClose'TensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierIncompleteSize'TensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierInsertMany'TensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
barrierReadySize'TensorFlow.GenOps.Core
barrierTakeManyTensorFlow.GenOps.Core
barrierTakeMany'TensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholesky'TensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchCholeskyGrad'TensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT'TensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT2D'TensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchFFT3D'TensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT'TensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT2D'TensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchIFFT3D'TensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatMul'TensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixBandPart'TensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDeterminant'TensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiag'TensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixDiagPart'TensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixInverse'TensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSetDiag'TensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolve'TensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixSolveLs'TensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchMatrixTriangularSolve'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEig'TensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSelfAdjointEigV2'TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchSvd'TensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpace'TensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
batchToSpaceND'TensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
betainc'TensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAdd'TensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddGrad'TensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasAddV1'TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bitcastTensorFlow.GenOps.Core
bitcast'TensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
broadcastArgsTensorFlow.GenOps.Core
broadcastArgs'TensorFlow.GenOps.Core
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
broadcastGradientArgs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
bucketProto.Tensorflow.Core.Framework.Summary
bucketLimitProto.Tensorflow.Core.Framework.Summary
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
BuildInputsTensorFlow.BuildOp
buildInputsTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildResultTensorFlow.BuildOp
buildResultTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
camelCaseTensorFlow.OpGen.ParsedOp
cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
cast' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ceilTensorFlow.GenOps.Core
ceil'TensorFlow.GenOps.Core
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
checkNumerics'TensorFlow.GenOps.Core
checkpointPathProto.Tensorflow.Core.Util.Event
choleskyTensorFlow.GenOps.Core
cholesky'TensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
choleskyGrad'TensorFlow.GenOps.Core
collectAllSummariesTensorFlow.Tensor
colocateWithTensorFlow.Tensor, TensorFlow.Core
colorspaceProto.Tensorflow.Core.Framework.Summary
complexTensorFlow.GenOps.Core
complex'TensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
complexAbs'TensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
computeAccidentalHits'TensorFlow.GenOps.Core
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concat' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatOffsetTensorFlow.GenOps.Core
concatOffset'TensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
concatV2'TensorFlow.GenOps.Core
conditionalAccumulatorTensorFlow.GenOps.Core
conditionalAccumulator'TensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
conj'TensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
const'TensorFlow.GenOps.Core
constantTensorFlow.Ops
constant'TensorFlow.Ops
containerProto.Tensorflow.Core.Framework.ResourceHandle
contentTypeProto.Tensorflow.Core.Framework.Summary
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
controlTriggerTensorFlow.GenOps.Core
controlTrigger'TensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2D'TensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropFilter'TensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv2DBackpropInput'TensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3D'TensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilter'TensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropFilterV2'TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInput'TensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
conv3DBackpropInputV2'TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copy'TensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
copyHost'TensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
cos'TensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
countUpToTensorFlow.GenOps.Core
countUpTo'TensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResize'TensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradBoxes'TensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
cropAndResizeGradImage'TensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cross'TensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCBeamSearchDecoder'TensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCGreedyDecoder'TensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cTCLoss'TensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumprod'TensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
cumsum'TensorFlow.GenOps.Core
DataType 
1 (Type/Class)TensorFlow.Types
2 (Type/Class)Proto.Tensorflow.Core.Framework.Types
dcomplexValProto.Tensorflow.Core.Framework.Tensor
debugIdentityTensorFlow.GenOps.Core
debugIdentity'TensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugNanCount'TensorFlow.GenOps.Core
debugNumericSummaryTensorFlow.GenOps.Core
debugNumericSummary'TensorFlow.GenOps.Core
debugOptionsProto.Tensorflow.Core.Protobuf.Config
decodeBase64TensorFlow.GenOps.Core
decodeBase64'TensorFlow.GenOps.Core
decodeCSVTensorFlow.GenOps.Core
decodeCSV'TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeGif'TensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJpeg'TensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodeJSONExample'TensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodePng'TensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeRaw'TensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types, TensorFlow.Core
decodeTFRecordsTensorFlow.Records.Conduit
defaultValueProto.Tensorflow.Core.Framework.OpDef
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deleteSessionTensor'TensorFlow.GenOps.Core
denseToDenseSetOperationTensorFlow.GenOps.Core
denseToDenseSetOperation'TensorFlow.GenOps.Core
denseToSparseSetOperationTensorFlow.GenOps.Core
denseToSparseSetOperation'TensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthToSpace'TensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNative'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequantize'TensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
deserializeManySparse'TensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
destroyTemporaryVariable'TensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Core
2 (Type/Class)TensorFlow.Output, TensorFlow.Core
device 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceNameTensorFlow.Output, TensorFlow.Core
diagTensorFlow.GenOps.Core
diag'TensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
diagPart'TensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
digamma'TensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2D'TensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropFilter'TensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dilation2DBackpropInput'TensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
divTensorFlow.GenOps.Core
div'TensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
drawBoundingBoxesTensorFlow.GenOps.Core
drawBoundingBoxes'TensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtypeProto.Tensorflow.Core.Framework.Tensor
DT_BFLOAT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INVALID 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicPartition'TensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
dynamicStitch'TensorFlow.GenOps.Core
editDistanceTensorFlow.GenOps.Core
editDistance'TensorFlow.GenOps.Core
eluTensorFlow.GenOps.Core
elu'TensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
eluGrad'TensorFlow.GenOps.Core
embeddingLookupTensorFlow.EmbeddingOps
enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
encodeBase64TensorFlow.GenOps.Core
encodeBase64'TensorFlow.GenOps.Core
encodedAudioStringProto.Tensorflow.Core.Framework.Summary
encodedImageStringProto.Tensorflow.Core.Framework.Summary
encodeJpegTensorFlow.GenOps.Core
encodeJpeg'TensorFlow.GenOps.Core
encodeOutputTensorFlow.Build
encodePngTensorFlow.GenOps.Core
encodePng'TensorFlow.GenOps.Core
encodeTensorDataTensorFlow.Types, TensorFlow.Core
encodeTFRecordsTensorFlow.Records.Conduit
enqueueTensorFlow.Queue
enterTensorFlow.GenOps.Core
enter'TensorFlow.GenOps.Core
eqLengthGuardTensorFlow.BuildOp
equal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
equal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
erfTensorFlow.GenOps.Core
erf'TensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
erfc'TensorFlow.GenOps.Core
evalBuildTTensorFlow.Build
Event 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
EventWriterTensorFlow.Logging
ExcludedCaseTensorFlow.Types
excludeListTensorFlow.OpGen
exitTensorFlow.GenOps.Core
exit'TensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
exp'TensorFlow.GenOps.Core
expandDims 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
expandDims' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
explanationProto.Tensorflow.Core.Framework.OpDef
explicitInputAttrsTensorFlow.OpGen.ParsedOp
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
expm1TensorFlow.GenOps.Core
expm1'TensorFlow.GenOps.Core
exprTensorFlow.Tensor, TensorFlow.Core
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
extractGlimpseTensorFlow.GenOps.Core
extractGlimpse'TensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
extractImagePatches'TensorFlow.GenOps.Core
fProto.Tensorflow.Core.Framework.AttrValue
factTensorFlow.GenOps.Core
fact'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
fakeQueueTensorFlow.GenOps.Core
fakeQueue'TensorFlow.GenOps.Core
Feed 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
fFTTensorFlow.GenOps.Core
fFT'TensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT2D'TensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fFT3D'TensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fIFOQueue'TensorFlow.GenOps.Core
fIFOQueueV2TensorFlow.GenOps.Core
fIFOQueueV2'TensorFlow.GenOps.Core
fileVersionProto.Tensorflow.Core.Util.Event
fill 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fill' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedLengthRecordReader'TensorFlow.GenOps.Core
fixedLengthRecordReaderV2TensorFlow.GenOps.Core
fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
flagParserTensorFlow.OpGen
floatValProto.Tensorflow.Core.Framework.Tensor
floorTensorFlow.GenOps.Core
floor'TensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorDiv'TensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
floorMod'TensorFlow.GenOps.Core
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPool'TensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalAvgPoolGrad'TensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPool'TensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolGrad'TensorFlow.GenOps.Core
fromTensorTypeListTensorFlow.Types
fromTensorTypesTensorFlow.Types
funcProto.Tensorflow.Core.Framework.AttrValue
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNorm'TensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedBatchNormGrad'TensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedPadConv2D'TensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
gatherTensorFlow.GenOps.Core
gather'TensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
gatherNd'TensorFlow.GenOps.Core
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getSessionHandleTensorFlow.GenOps.Core
getSessionHandle'TensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
getSessionTensor'TensorFlow.GenOps.Core
getTFRecordTensorFlow.Records
getTFRecordDataTensorFlow.Records
getTFRecordLengthTensorFlow.Records
getTFRecordsTensorFlow.Records
getVarIntTensorFlow.Internal.VarInt
globalJitLevelProto.Tensorflow.Core.Protobuf.Config
GPUOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
gpuOptionsProto.Tensorflow.Core.Protobuf.Config
gradientsTensorFlow.Gradient
GraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
2 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
graphDefProto.Tensorflow.Core.Util.Event
GraphOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
graphOptionsProto.Tensorflow.Core.Protobuf.Config
GraphStateTensorFlow.Build
greaterTensorFlow.GenOps.Core
greater'TensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
greaterEqual'TensorFlow.GenOps.Core
groupTensorFlow.ControlFlow, TensorFlow.Core
halfValProto.Tensorflow.Core.Framework.Tensor
hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
hashTableTensorFlow.GenOps.Core
hashTable'TensorFlow.GenOps.Core
HaskellName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
haskellNameTensorFlow.OpGen.ParsedOp
hasMinimumProto.Tensorflow.Core.Framework.OpDef
heightProto.Tensorflow.Core.Framework.Summary
histoProto.Tensorflow.Core.Framework.Summary
HistogramProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
histogramSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
histogramSummary'TensorFlow.GenOps.Core
hoistBuildTTensorFlow.Build
hSVToRGBTensorFlow.GenOps.Core
hSVToRGB'TensorFlow.GenOps.Core
iProto.Tensorflow.Core.Framework.AttrValue
identity 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identity' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identityReaderTensorFlow.GenOps.Core
identityReader'TensorFlow.GenOps.Core
identityReaderV2TensorFlow.GenOps.Core
identityReaderV2'TensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT'TensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT2D'TensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
iFFT3D'TensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igamma'TensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
igammac'TensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imag'TensorFlow.GenOps.Core
imageProto.Tensorflow.Core.Framework.Summary
imageSummaryTensorFlow.GenOps.Core
imageSummary'TensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
immutableConst'TensorFlow.GenOps.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
inferredTypeAttrsTensorFlow.OpGen.ParsedOp
inferShapesProto.Tensorflow.Core.Protobuf.Config
initializedVariableTensorFlow.Ops
initializedVariable'TensorFlow.Ops
initializeTableTensorFlow.GenOps.Core
initializeTable'TensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
initializeTableFromTextFile'TensorFlow.GenOps.Core
inputProto.Tensorflow.Core.Framework.NodeDef
inputArgProto.Tensorflow.Core.Framework.OpDef
int64ValProto.Tensorflow.Core.Framework.Tensor
interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
inTopKTensorFlow.GenOps.Core
inTopK'TensorFlow.GenOps.Core
intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
intValProto.Tensorflow.Core.Framework.Tensor
invTensorFlow.GenOps.Core
inv'TensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invertPermutation'TensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
invGrad'TensorFlow.GenOps.Core
isAggregateProto.Tensorflow.Core.Framework.OpDef
isCommutativeProto.Tensorflow.Core.Framework.OpDef
isFiniteTensorFlow.GenOps.Core
isFinite'TensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isInf'TensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isNan'TensorFlow.GenOps.Core
isRefProto.Tensorflow.Core.Framework.OpDef
isStatefulProto.Tensorflow.Core.Framework.OpDef
isVariableInitializedTensorFlow.GenOps.Core
isVariableInitialized'TensorFlow.GenOps.Core
key 
1 (Function)Proto.Tensorflow.Core.Protobuf.Config
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
l2LossTensorFlow.GenOps.Core
l2Loss'TensorFlow.GenOps.Core
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
lengthFramesProto.Tensorflow.Core.Framework.Summary
lessTensorFlow.GenOps.Core
less'TensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lessEqual'TensorFlow.GenOps.Core
levelProto.Tensorflow.Core.Util.Event
lgammaTensorFlow.GenOps.Core
lgamma'TensorFlow.GenOps.Core
libraryProto.Tensorflow.Core.Framework.Graph
linSpaceTensorFlow.GenOps.Core
linSpace'TensorFlow.GenOps.Core
ListTensorFlow.Types
listProto.Tensorflow.Core.Framework.AttrValue
ListArgTensorFlow.OpGen.ParsedOp
listDiffTensorFlow.GenOps.Core
listDiff'TensorFlow.GenOps.Core
ListOfTensorFlow.Types
logTensorFlow.GenOps.Core
log'TensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
log1p'TensorFlow.GenOps.Core
logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
logEventTensorFlow.Logging
logicalAndTensorFlow.GenOps.Core
logicalAnd'TensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalNot'TensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logicalOr'TensorFlow.GenOps.Core
LogMessage 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
logMessageProto.Tensorflow.Core.Util.Event
LogMessage'DEBUGProto.Tensorflow.Core.Util.Event
LogMessage'ERRORProto.Tensorflow.Core.Util.Event
LogMessage'FATALProto.Tensorflow.Core.Util.Event
LogMessage'INFOProto.Tensorflow.Core.Util.Event
LogMessage'LevelProto.Tensorflow.Core.Util.Event
LogMessage'UNKNOWNProto.Tensorflow.Core.Util.Event
LogMessage'WARNProto.Tensorflow.Core.Util.Event
logSoftmaxTensorFlow.GenOps.Core
logSoftmax'TensorFlow.GenOps.Core
logSummaryTensorFlow.Logging
logUniformCandidateSamplerTensorFlow.GenOps.Core
logUniformCandidateSampler'TensorFlow.GenOps.Core
lookupNodeTensorFlow.Build
lookupTableExportTensorFlow.GenOps.Core
lookupTableExport'TensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableFind'TensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableImport'TensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableInsert'TensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
lookupTableSize'TensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
loopCond'TensorFlow.GenOps.Core
lRNTensorFlow.GenOps.Core
lRN'TensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
lRNGrad'TensorFlow.GenOps.Core
makeQueueTensorFlow.Queue
matchingFilesTensorFlow.GenOps.Core
matchingFiles'TensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matMul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixBandPart'TensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDeterminant'TensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiag'TensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixDiagPart'TensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixInverse'TensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSetDiag'TensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolve'TensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixSolveLs'TensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matrixTriangularSolve'TensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
matTranspose'TensorFlow.Ops
max 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
max'TensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maximum'TensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool'TensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3D'TensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPool3DGrad'TensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGrad'TensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradWithArgmax'TensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmax'TensorFlow.GenOps.Core
maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
maybe'audioProto.Tensorflow.Core.Framework.Summary
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'fileVersionProto.Tensorflow.Core.Util.Event
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphDefProto.Tensorflow.Core.Util.Event
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'histoProto.Tensorflow.Core.Framework.Summary
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'imageProto.Tensorflow.Core.Framework.Summary
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'logMessageProto.Tensorflow.Core.Util.Event
maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'sessionLogProto.Tensorflow.Core.Util.Event
maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'summaryProto.Tensorflow.Core.Util.Event
maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
maybe'tensor 
1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
2 (Function)Proto.Tensorflow.Core.Framework.Summary
maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
maybe'versionsProto.Tensorflow.Core.Framework.Graph
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
mean 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mean' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mergeTensorFlow.GenOps.Core
merge'TensorFlow.GenOps.Core
mergeAllSummariesTensorFlow.Logging
mergeSummaryTensorFlow.GenOps.Core
mergeSummary'TensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
mergeV2Checkpoints'TensorFlow.GenOps.Core
messageProto.Tensorflow.Core.Util.Event
metaGraphDefProto.Tensorflow.Core.Util.Event
min 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
min'TensorFlow.GenOps.Core
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
minimum'TensorFlow.GenOps.Core
mirrorPadTensorFlow.GenOps.Core
mirrorPad'TensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
mirrorPadGrad'TensorFlow.GenOps.Core
MixedListArgTensorFlow.OpGen.ParsedOp
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mod'TensorFlow.GenOps.Core
MonadBuildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
msgProto.Tensorflow.Core.Util.Event
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
multinomial'TensorFlow.GenOps.Core
mutableDenseHashTableTensorFlow.GenOps.Core
mutableDenseHashTable'TensorFlow.GenOps.Core
mutableHashTableTensorFlow.GenOps.Core
mutableHashTable'TensorFlow.GenOps.Core
mutableHashTableOfTensorsTensorFlow.GenOps.Core
mutableHashTableOfTensors'TensorFlow.GenOps.Core
Name 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
name 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
4 (Function)Proto.Tensorflow.Core.Framework.TensorShape
5 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
NameAttrList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NameAttrList'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
neg 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
neg' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
negTrainTensorFlow.GenOps.Core
negTrain'TensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nextIteration'TensorFlow.GenOps.Core
NilTensorFlow.Types
nodeProto.Tensorflow.Core.Framework.Graph
NodeDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeName 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
nodeNameProto.Tensorflow.Core.Framework.Summary
NodesTensorFlow.Nodes, TensorFlow.Core
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
nonMaxSuppressionTensorFlow.GenOps.Core
nonMaxSuppression'TensorFlow.GenOps.Core
noOp 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
noOp'TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
notEqual'TensorFlow.GenOps.Core
numProto.Tensorflow.Core.Framework.Summary
numberAttrProto.Tensorflow.Core.Framework.OpDef
numChannelsProto.Tensorflow.Core.Framework.Summary
numThreadsProto.Tensorflow.Core.Protobuf.Config
obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
oneHot 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
oneHot' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
OneOfTensorFlow.Types, TensorFlow.Core
OneOfsTensorFlow.Types
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
opAttrTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
OpParamsTensorFlow.BuildOp
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
OptionsTensorFlow.Session, TensorFlow.Core
optLevelProto.Tensorflow.Core.Protobuf.Config
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputTensorFlow.Output
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputNodeNameTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
pack 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
pack' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
padTensorFlow.GenOps.Core
pad'TensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
paddingFIFOQueue'TensorFlow.GenOps.Core
paddingFIFOQueueV2TensorFlow.GenOps.Core
paddingFIFOQueueV2'TensorFlow.GenOps.Core
parallelConcatTensorFlow.GenOps.Core
parallelConcat'TensorFlow.GenOps.Core
parameterizedTruncatedNormalTensorFlow.GenOps.Core
parameterizedTruncatedNormal'TensorFlow.GenOps.Core
ParsedArg 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
ParsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgDescriptionTensorFlow.OpGen.ParsedOp
parsedArgNameTensorFlow.OpGen.ParsedOp
parsedInputsTensorFlow.OpGen.ParsedOp
ParsedOp 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
parsedOpDescriptionTensorFlow.OpGen.ParsedOp
parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
parsedOpNameTensorFlow.OpGen.ParsedOp
parsedOpSummaryTensorFlow.OpGen.ParsedOp
parsedOutputsTensorFlow.OpGen.ParsedOp
parseExampleTensorFlow.GenOps.Core
parseExample'TensorFlow.GenOps.Core
parseOpTensorFlow.OpGen.ParsedOp
parseSingleSequenceExampleTensorFlow.GenOps.Core
parseSingleSequenceExample'TensorFlow.GenOps.Core
parseTensorTensorFlow.GenOps.Core
parseTensor'TensorFlow.GenOps.Core
partitionGraphsProto.Tensorflow.Core.Protobuf.Config
PendingNodeNameTensorFlow.Output
perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
placeholder 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
placeholder' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
placeholderV2TensorFlow.GenOps.Core
placeholderV2'TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
placeholderWithDefault'TensorFlow.GenOps.Core
placementPeriodProto.Tensorflow.Core.Protobuf.Config
placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
polygammaTensorFlow.GenOps.Core
polygamma'TensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
pow'TensorFlow.GenOps.Core
prefixTensorFlow.OpGen
preventGradientTensorFlow.GenOps.Core
preventGradient'TensorFlow.GenOps.Core
printTensorFlow.GenOps.Core
print'TensorFlow.GenOps.Core
priorityQueueTensorFlow.GenOps.Core
priorityQueue'TensorFlow.GenOps.Core
priorityQueueV2TensorFlow.GenOps.Core
priorityQueueV2'TensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
prod'TensorFlow.GenOps.Core
protoShapeTensorFlow.Types
pureOpTensorFlow.BuildOp
PureResultTensorFlow.BuildOp
pureResultTensorFlow.BuildOp
putTFRecordTensorFlow.Records
putTFRecordDataTensorFlow.Records
putTFRecordLengthTensorFlow.Records
putVarIntTensorFlow.Internal.VarInt
qrTensorFlow.GenOps.Core
qr'TensorFlow.GenOps.Core
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizeAndDequantize'TensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedAvgPool'TensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedBiasAdd'TensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConcat'TensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedConv2D'TensorFlow.GenOps.Core
quantizedInstanceNormTensorFlow.GenOps.Core
quantizedInstanceNorm'TensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMatMul'TensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizedMaxPool'TensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu'TensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedRelu6'TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReluX'TensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizedReshape'TensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
quantizeV2'TensorFlow.GenOps.Core
QueueTensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueClose'TensorFlow.GenOps.Core
queueCloseV2TensorFlow.GenOps.Core
queueCloseV2'TensorFlow.GenOps.Core
queueDequeueTensorFlow.GenOps.Core
queueDequeue'TensorFlow.GenOps.Core
queueDequeueManyTensorFlow.GenOps.Core
queueDequeueMany'TensorFlow.GenOps.Core
queueDequeueManyV2TensorFlow.GenOps.Core
queueDequeueManyV2'TensorFlow.GenOps.Core
queueDequeueUpToTensorFlow.GenOps.Core
queueDequeueUpTo'TensorFlow.GenOps.Core
queueDequeueUpToV2TensorFlow.GenOps.Core
queueDequeueUpToV2'TensorFlow.GenOps.Core
queueDequeueV2TensorFlow.GenOps.Core
queueDequeueV2'TensorFlow.GenOps.Core
queueEnqueueTensorFlow.GenOps.Core
queueEnqueue'TensorFlow.GenOps.Core
queueEnqueueManyTensorFlow.GenOps.Core
queueEnqueueMany'TensorFlow.GenOps.Core
queueEnqueueManyV2TensorFlow.GenOps.Core
queueEnqueueManyV2'TensorFlow.GenOps.Core
queueEnqueueV2TensorFlow.GenOps.Core
queueEnqueueV2'TensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
queueSize'TensorFlow.GenOps.Core
queueSizeV2TensorFlow.GenOps.Core
queueSizeV2'TensorFlow.GenOps.Core
randomCropTensorFlow.GenOps.Core
randomCrop'TensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomGamma'TensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffle'TensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomShuffleQueue'TensorFlow.GenOps.Core
randomShuffleQueueV2TensorFlow.GenOps.Core
randomShuffleQueueV2'TensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomStandardNormal'TensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniform'TensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
randomUniformInt'TensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
range' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rankTensorFlow.GenOps.Core
rank'TensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumRecordsProduced'TensorFlow.GenOps.Core
readerNumRecordsProducedV2TensorFlow.GenOps.Core
readerNumRecordsProducedV2'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerRead'TensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerReadUpTo'TensorFlow.GenOps.Core
readerReadUpToV2TensorFlow.GenOps.Core
readerReadUpToV2'TensorFlow.GenOps.Core
readerReadV2TensorFlow.GenOps.Core
readerReadV2'TensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerReset'TensorFlow.GenOps.Core
readerResetV2TensorFlow.GenOps.Core
readerResetV2'TensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerRestoreState'TensorFlow.GenOps.Core
readerRestoreStateV2TensorFlow.GenOps.Core
readerRestoreStateV2'TensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readerSerializeState'TensorFlow.GenOps.Core
readerSerializeStateV2TensorFlow.GenOps.Core
readerSerializeStateV2'TensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readFile'TensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
readVariableOpTensorFlow.GenOps.Core
readVariableOp'TensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
real'TensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
realDiv'TensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocal'TensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reciprocalGrad'TensorFlow.GenOps.Core
recordInputTensorFlow.GenOps.Core
recordInput'TensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
reduceJoin'TensorFlow.GenOps.Core
Ref 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
refEnterTensorFlow.GenOps.Core
refEnter'TensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refExit'TensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refIdentity'TensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refMerge'TensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refNextIteration'TensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSelect'TensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
refSwitch'TensorFlow.GenOps.Core
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6'TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
relu6Grad'TensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reluGrad' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
renderTensorFlow.Tensor, TensorFlow.Core
RenderedTensorFlow.Tensor
renderedTensorFlow.Tensor
renderedNodeDefsTensorFlow.Build
renderedOutputTensorFlow.Tensor
renderValueTensorFlow.Tensor
requantizationRangeTensorFlow.GenOps.Core
requantizationRange'TensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
requantize'TensorFlow.GenOps.Core
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reshape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeArea'TensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBicubic'TensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinear'TensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeBilinearGrad'TensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighbor'TensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resizeNearestNeighborGrad'TensorFlow.GenOps.Core
resourceApplyAdadeltaTensorFlow.GenOps.Core
resourceApplyAdadelta'TensorFlow.GenOps.Core
resourceApplyAdagradTensorFlow.GenOps.Core
resourceApplyAdagrad'TensorFlow.GenOps.Core
resourceApplyAdagradDATensorFlow.GenOps.Core
resourceApplyAdagradDA'TensorFlow.GenOps.Core
resourceApplyAdamTensorFlow.GenOps.Core
resourceApplyAdam'TensorFlow.GenOps.Core
resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceApplyFtrlTensorFlow.GenOps.Core
resourceApplyFtrl'TensorFlow.GenOps.Core
resourceApplyGradientDescentTensorFlow.GenOps.Core
resourceApplyGradientDescent'TensorFlow.GenOps.Core
resourceApplyMomentumTensorFlow.GenOps.Core
resourceApplyMomentum'TensorFlow.GenOps.Core
resourceApplyProximalAdagradTensorFlow.GenOps.Core
resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceApplyRMSPropTensorFlow.GenOps.Core
resourceApplyRMSProp'TensorFlow.GenOps.Core
ResourceArgTensorFlow.OpGen.ParsedOp
resourceGatherTensorFlow.GenOps.Core
resourceGather'TensorFlow.GenOps.Core
ResourceHandle 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
4 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
resourceScatterAddTensorFlow.GenOps.Core
resourceScatterAdd'TensorFlow.GenOps.Core
resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
resourceSparseApplyAdagradTensorFlow.GenOps.Core
resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyAdagradDATensorFlow.GenOps.Core
resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceSparseApplyFtrlTensorFlow.GenOps.Core
resourceSparseApplyFtrl'TensorFlow.GenOps.Core
resourceSparseApplyMomentumTensorFlow.GenOps.Core
resourceSparseApplyMomentum'TensorFlow.GenOps.Core
resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceSparseApplyRMSPropTensorFlow.GenOps.Core
resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restore'TensorFlow.GenOps.Core
restoreFromNameTensorFlow.Ops
restoreSliceTensorFlow.GenOps.Core
restoreSlice'TensorFlow.GenOps.Core
restoreV2TensorFlow.GenOps.Core
restoreV2'TensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverse'TensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseSequence'TensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
reverseV2'TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rGBToHSV'TensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
rint'TensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
round'TensorFlow.GenOps.Core
RPCOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
rpcOptionsProto.Tensorflow.Core.Protobuf.Config
rsqrtTensorFlow.GenOps.Core
rsqrt'TensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
rsqrtGrad'TensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session, TensorFlow.Core
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
runMetadataProto.Tensorflow.Core.Util.Event
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runRefTensorFlow.Tensor
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runValueTensorFlow.Tensor
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
sampleDistortedBoundingBox'TensorFlow.GenOps.Core
sampleRateProto.Tensorflow.Core.Framework.Summary
save 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
save'TensorFlow.GenOps.Core
saveSlicesTensorFlow.GenOps.Core
saveSlices'TensorFlow.GenOps.Core
saveV2TensorFlow.GenOps.Core
saveV2'TensorFlow.GenOps.Core
Scalar 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
scalarTensorFlow.Ops
scalar'TensorFlow.Ops
scalarizeTensorFlow.Ops
scalarSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
scalarSummary'TensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterAdd'TensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterDiv'TensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterMul'TensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNd'TensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdAdd'TensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdSub'TensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterNdUpdate'TensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterSub'TensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scatterUpdate'TensorFlow.GenOps.Core
scomplexValProto.Tensorflow.Core.Framework.Tensor
sdcaFprintTensorFlow.GenOps.Core
sdcaFprint'TensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaOptimizer'TensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
sdcaShrinkL1'TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMax'TensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMean'TensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentMin'TensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentProd'TensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
segmentSum'TensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
select'TensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEig'TensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
selfAdjointEigV2'TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeManySparse'TensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
serializeSparse'TensorFlow.GenOps.Core
Session 
1 (Type/Class)TensorFlow.Session, TensorFlow.Core
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
SessionLog 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
sessionLogProto.Tensorflow.Core.Util.Event
SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
SessionLog'STARTProto.Tensorflow.Core.Util.Event
SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
SessionLog'STOPProto.Tensorflow.Core.Util.Event
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
setSizeTensorFlow.GenOps.Core
setSize'TensorFlow.GenOps.Core
Shape 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
shape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
shapeNTensorFlow.GenOps.Core
shapeN'TensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilename'TensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
shardedFilespec'TensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoid'TensorFlow.GenOps.Core
sigmoidCrossEntropyWithLogitsTensorFlow.NN
sigmoidGradTensorFlow.GenOps.Core
sigmoidGrad'TensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
SimpleArgTensorFlow.OpGen.ParsedOp
simpleValueProto.Tensorflow.Core.Framework.Summary
sinTensorFlow.GenOps.Core
sin'TensorFlow.GenOps.Core
sinkTFRecordsTensorFlow.Records.Conduit
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.TensorShape
size' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
skipgramTensorFlow.GenOps.Core
skipgram'TensorFlow.GenOps.Core
sliceTensorFlow.GenOps.Core
slice'TensorFlow.GenOps.Core
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplus'TensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softplusGrad'TensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsign'TensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
softsignGrad'TensorFlow.GenOps.Core
sourceTFRecordsTensorFlow.Records.Conduit
spaceToBatchTensorFlow.GenOps.Core
spaceToBatch'TensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToBatchND'TensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
spaceToDepth'TensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAdd'TensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseAddGrad'TensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdadelta'TensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagrad'TensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyAdagradDA'TensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyFtrl'TensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyMomentum'TensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseApplyRMSProp'TensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseConcat'TensorFlow.GenOps.Core
sparseConditionalAccumulatorTensorFlow.GenOps.Core
sparseConditionalAccumulator'TensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseAdd'TensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseDiv'TensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseDenseCwiseMul'TensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseMatMul'TensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSum'TensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReduceSumSparse'TensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReorder'TensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseReshape'TensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMean'TensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentMeanGrad'TensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtN'TensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSegmentSum'TensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmax'TensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMaximum'TensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSparseMinimum'TensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseSplit'TensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseAdd'TensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseTensorDenseMatMul'TensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToDense' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToSparseSetOperationTensorFlow.GenOps.Core
sparseToSparseSetOperation'TensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
split'TensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
splitV'TensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrt'TensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
sqrtGrad'TensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
square'TensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squaredDifference'TensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
squeeze'TensorFlow.GenOps.Core
stackTensorFlow.GenOps.Core
stack'TensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackClose'TensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPop'TensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stackPush'TensorFlow.GenOps.Core
stageTensorFlow.GenOps.Core
stage'TensorFlow.GenOps.Core
statusProto.Tensorflow.Core.Util.Event
stepProto.Tensorflow.Core.Util.Event
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stopGradient'TensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSlice'TensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceAssign'TensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stridedSliceGrad'TensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringJoin'TensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringSplit'TensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucket'TensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketFast'TensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToHashBucketStrong'TensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringToNumber'TensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sub' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
substrTensorFlow.GenOps.Core
substr'TensorFlow.GenOps.Core
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.Summary
sum' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summariesTensorFlow.Build
Summary 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
summary 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Util.Event
Summary'Audio 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Image 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Value 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryTensor 
1 (Type/Class)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Logging
sumSquaresProto.Tensorflow.Core.Framework.Summary
svdTensorFlow.GenOps.Core
svd'TensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
switch'TensorFlow.GenOps.Core
tag 
1 (Function)Proto.Tensorflow.Core.Util.Event
2 (Function)Proto.Tensorflow.Core.Framework.Summary
TaggedRunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
taggedRunMetadataProto.Tensorflow.Core.Util.Event
takeManySparseFromTensorsMapTensorFlow.GenOps.Core
takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tan'TensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanh'TensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
tanhGrad'TensorFlow.GenOps.Core
temporaryVariableTensorFlow.GenOps.Core
temporaryVariable'TensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
tensor 
1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
2 (Function)Proto.Tensorflow.Core.Framework.Summary
tensorArrayTensorFlow.GenOps.Core
tensorArray'TensorFlow.GenOps.Core
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayClose'TensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayCloseV2'TensorFlow.GenOps.Core
tensorArrayCloseV3TensorFlow.GenOps.Core
tensorArrayCloseV3'TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcat'TensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayConcatV2'TensorFlow.GenOps.Core
tensorArrayConcatV3TensorFlow.GenOps.Core
tensorArrayConcatV3'TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGather'TensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGatherV2'TensorFlow.GenOps.Core
tensorArrayGatherV3TensorFlow.GenOps.Core
tensorArrayGatherV3'TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGrad'TensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayGradV2'TensorFlow.GenOps.Core
tensorArrayGradV3TensorFlow.GenOps.Core
tensorArrayGradV3'TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayPack'TensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayRead'TensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayReadV2'TensorFlow.GenOps.Core
tensorArrayReadV3TensorFlow.GenOps.Core
tensorArrayReadV3'TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatter'TensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArrayScatterV2'TensorFlow.GenOps.Core
tensorArrayScatterV3TensorFlow.GenOps.Core
tensorArrayScatterV3'TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySize'TensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySizeV2'TensorFlow.GenOps.Core
tensorArraySizeV3TensorFlow.GenOps.Core
tensorArraySizeV3'TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplit'TensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArraySplitV2'TensorFlow.GenOps.Core
tensorArraySplitV3TensorFlow.GenOps.Core
tensorArraySplitV3'TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayUnpack'TensorFlow.GenOps.Core
tensorArrayV2TensorFlow.GenOps.Core
tensorArrayV2'TensorFlow.GenOps.Core
tensorArrayV3TensorFlow.GenOps.Core
tensorArrayV3'TensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWrite'TensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorArrayWriteV2'TensorFlow.GenOps.Core
tensorArrayWriteV3TensorFlow.GenOps.Core
tensorArrayWriteV3'TensorFlow.GenOps.Core
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
TensorDataTypeTensorFlow.Types, TensorFlow.Core
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor
TensorListTensorFlow.Tensor
tensorListOutputsTensorFlow.Tensor
tensorNodeNameTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefFromNameTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
tensorShapeProto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSummaryTensorFlow.GenOps.Core
tensorSummary'TensorFlow.GenOps.Core
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypeListTensorFlow.Types
TensorTypeProxy 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
TensorTypesTensorFlow.Types
tensorTypesTensorFlow.Types
tensorValTensorFlow.Types
tensorValueFromNameTensorFlow.Tensor
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
textLineReaderTensorFlow.GenOps.Core
textLineReader'TensorFlow.GenOps.Core
textLineReaderV2TensorFlow.GenOps.Core
textLineReaderV2'TensorFlow.GenOps.Core
TFName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
tfNameTensorFlow.OpGen.ParsedOp
tFRecordReaderTensorFlow.GenOps.Core
tFRecordReader'TensorFlow.GenOps.Core
tFRecordReaderV2TensorFlow.GenOps.Core
tFRecordReaderV2'TensorFlow.GenOps.Core
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tile'TensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
tileGrad'TensorFlow.GenOps.Core
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
toBuildTensorFlow.Tensor
topKTensorFlow.GenOps.Core
topK'TensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
topKV2'TensorFlow.GenOps.Core
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
transpose' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateDivTensorFlow.GenOps.Core
truncateDiv'TensorFlow.GenOps.Core
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncatedNormal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateModTensorFlow.GenOps.Core
truncateMod'TensorFlow.GenOps.Core
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeHintProto.Tensorflow.Core.Framework.Summary
typeListAttrProto.Tensorflow.Core.Framework.OpDef
TypeParam 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
typeParamIsListTensorFlow.OpGen.ParsedOp
typeParamRestrictionsTensorFlow.OpGen.ParsedOp
unControlNodeTensorFlow.Output, TensorFlow.Build
unHaskellNameTensorFlow.OpGen.ParsedOp
uniformCandidateSamplerTensorFlow.GenOps.Core
uniformCandidateSampler'TensorFlow.GenOps.Core
UniqueTensorFlow.Build
uniqueTensorFlow.GenOps.Core
unique'TensorFlow.GenOps.Core
uniqueWithCountsTensorFlow.GenOps.Core
uniqueWithCounts'TensorFlow.GenOps.Core
unknownRankProto.Tensorflow.Core.Framework.TensorShape
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
unpackTensorFlow.GenOps.Core
unpack'TensorFlow.GenOps.Core
unScalarTensorFlow.Types, TensorFlow.Core
unsortedSegmentSumTensorFlow.GenOps.Core
unsortedSegmentSum'TensorFlow.GenOps.Core
unstageTensorFlow.GenOps.Core
unstage'TensorFlow.GenOps.Core
unTensorDataTensorFlow.Types
unTFNameTensorFlow.OpGen.ParsedOp
usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
Value 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
value 
1 (Function)TensorFlow.Tensor, TensorFlow.Core
2 (Function)Proto.Tensorflow.Core.Protobuf.Config
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
5 (Function)Proto.Tensorflow.Core.Framework.Summary
varHandleOpTensorFlow.GenOps.Core
varHandleOp'TensorFlow.GenOps.Core
variable 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
variable' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
variableV2TensorFlow.GenOps.Core
variableV2'TensorFlow.GenOps.Core
varIsInitializedOpTensorFlow.GenOps.Core
varIsInitializedOp'TensorFlow.GenOps.Core
vectorTensorFlow.Ops
vector'TensorFlow.Ops
version 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
versionNumberProto.Tensorflow.Core.Framework.Tensor
versionsProto.Tensorflow.Core.Framework.Graph
visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
wallTimeProto.Tensorflow.Core.Util.Event
where'TensorFlow.GenOps.Core
where''TensorFlow.GenOps.Core
wholeFileReaderTensorFlow.GenOps.Core
wholeFileReader'TensorFlow.GenOps.Core
wholeFileReaderV2TensorFlow.GenOps.Core
wholeFileReaderV2'TensorFlow.GenOps.Core
widthProto.Tensorflow.Core.Framework.Summary
withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
withDeviceTensorFlow.Build, TensorFlow.Core
withEventWriterTensorFlow.Logging
withNameScopeTensorFlow.Build, TensorFlow.Core
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
writeFileTensorFlow.GenOps.Core
writeFile'TensorFlow.GenOps.Core
wtsCkptTensorFlow.Examples.MNIST.TrainedGraph
zeroInitializedVariableTensorFlow.Ops
zeroInitializedVariable'TensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zerosLike' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
zeta'TensorFlow.GenOps.Core
\\TensorFlow.Types
_ArgTensorFlow.GenOps.Core
_Arg'TensorFlow.GenOps.Core
_ArrayToListTensorFlow.GenOps.Core
_ArrayToList'TensorFlow.GenOps.Core
_AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_Event'fileVersionProto.Tensorflow.Core.Util.Event
_Event'graphDefProto.Tensorflow.Core.Util.Event
_Event'logMessageProto.Tensorflow.Core.Util.Event
_Event'metaGraphDefProto.Tensorflow.Core.Util.Event
_Event'sessionLogProto.Tensorflow.Core.Util.Event
_Event'stepProto.Tensorflow.Core.Util.Event
_Event'summaryProto.Tensorflow.Core.Util.Event
_Event'taggedRunMetadataProto.Tensorflow.Core.Util.Event
_Event'wallTimeProto.Tensorflow.Core.Util.Event
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
_HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
_HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
_HistogramProto'minProto.Tensorflow.Core.Framework.Summary
_HistogramProto'numProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
_HostCastTensorFlow.GenOps.Core
_HostCast'TensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostRecv'TensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_HostSend'TensorFlow.GenOps.Core
_ListToArrayTensorFlow.GenOps.Core
_ListToArray'TensorFlow.GenOps.Core
_LogMessage'levelProto.Tensorflow.Core.Util.Event
_LogMessage'messageProto.Tensorflow.Core.Util.Event
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_ParallelConcatStartTensorFlow.GenOps.Core
_ParallelConcatStart'TensorFlow.GenOps.Core
_ParallelConcatUpdateTensorFlow.GenOps.Core
_ParallelConcatUpdate'TensorFlow.GenOps.Core
_RecvTensorFlow.GenOps.Core
_Recv'TensorFlow.GenOps.Core
_ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_Retval'TensorFlow.GenOps.Core
_RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SendTensorFlow.GenOps.Core
_Send'TensorFlow.GenOps.Core
_SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
_SessionLog'msgProto.Tensorflow.Core.Util.Event
_SessionLog'statusProto.Tensorflow.Core.Util.Event
_Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
_Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
_Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
_Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
_Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
_Summary'valueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'audioProto.Tensorflow.Core.Framework.Summary
_Summary'Value'histoProto.Tensorflow.Core.Framework.Summary
_Summary'Value'imageProto.Tensorflow.Core.Framework.Summary
_Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
_Summary'Value'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
_Summary'Value'simpleValueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tensorProto.Tensorflow.Core.Framework.Summary
_SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
_TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
_TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file +

 

Index

/:/TensorFlow.Types
/=TensorFlow.Types, TensorFlow.Core
:/TensorFlow.Types
abortTensorFlow.GenOps.Core
abort'TensorFlow.GenOps.Core
ABORTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
abs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorApplyGradient'TensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorNumAccumulated'TensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorSetGlobalStep'TensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
accumulatorTakeGradient'TensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
acos'TensorFlow.GenOps.Core
acoshTensorFlow.GenOps.Core
acosh'TensorFlow.GenOps.Core
adamTensorFlow.Minimize
adam'TensorFlow.Minimize
adamBeta1TensorFlow.Minimize
adamBeta2TensorFlow.Minimize
AdamConfig 
1 (Data Constructor)TensorFlow.Minimize
2 (Type/Class)TensorFlow.Minimize
adamEpsilonTensorFlow.Minimize
adamLearningRateTensorFlow.Minimize
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
add' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build, TensorFlow.Core
addManySparseToTensorsMapTensorFlow.GenOps.Core
addManySparseToTensorsMap'TensorFlow.GenOps.Core
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addN' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSparseToTensorsMapTensorFlow.GenOps.Core
addSparseToTensorsMap'TensorFlow.GenOps.Core
addSummaryTensorFlow.Tensor
adjustContrastTensorFlow.GenOps.Core
adjustContrast'TensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustContrastv2'TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
adjustHue'TensorFlow.GenOps.Core
adjustSaturationTensorFlow.GenOps.Core
adjustSaturation'TensorFlow.GenOps.Core
aliasInputPortProto.Tensorflow.Core.Framework.CostGraph
allTensorFlow.GenOps.Core
all'TensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allCandidateSampler'TensorFlow.GenOps.Core
allEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
allocatedBytesProto.Tensorflow.Core.Framework.AllocationDescription
AllocationDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AllocationDescription
2 (Type/Class)Proto.Tensorflow.Core.Framework.AllocationDescription
allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
allocationId 
1 (Function)Proto.Tensorflow.Core.Framework.LogMemory
2 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
allocatorBytesInUseProto.Tensorflow.Core.Framework.StepStats
AllocatorMemoryUsed 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
allocatorName 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.LogMemory
3 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValues 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.KernelDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
allStartMicrosProto.Tensorflow.Core.Framework.StepStats
AllTensorTypesTensorFlow.Types
ALREADY_EXISTSProto.Tensorflow.Core.Lib.Core.ErrorCodes
anyTensorFlow.GenOps.Core
any'TensorFlow.GenOps.Core
anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
anyListProto.Tensorflow.Core.Protobuf.MetaGraph
applyAdadeltaTensorFlow.GenOps.Core
applyAdadelta'TensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagrad'TensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdagradDA'TensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyAdam'TensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyCenteredRMSProp'TensorFlow.GenOps.Core
applyDelayCompensatedGradientDescentTensorFlow.GenOps.Core
applyDelayCompensatedGradientDescent'TensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyFtrl'TensorFlow.GenOps.Core
applyFtrlV2TensorFlow.GenOps.Core
applyFtrlV2'TensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyGradientDescent'TensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyMomentum'TensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalAdagrad'TensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyProximalGradientDescent'TensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
applyRMSProp'TensorFlow.GenOps.Core
approximateEqualTensorFlow.GenOps.Core
approximateEqual'TensorFlow.GenOps.Core
ArgKindTensorFlow.OpGen.ParsedOp
argKindTensorFlow.OpGen.ParsedOp
argLengthTensorFlow.OpGen.ParsedOp
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
argMin'TensorFlow.GenOps.Core
ArgSomeTensorTensorFlow.OpGen.ParsedOp
ArgTensorBuildTensorFlow.OpGen.ParsedOp
ArgTensorRefTensorFlow.OpGen.ParsedOp
ArgTensorValueTensorFlow.OpGen.ParsedOp
ArgTypeTensorFlow.OpGen.ParsedOp
argTypeTensorFlow.OpGen.ParsedOp
ArgTypeAttrTensorFlow.OpGen.ParsedOp
argTypeAttrTensorFlow.OpGen.ParsedOp
ArgTypeFixedTensorFlow.OpGen.ParsedOp
argumentProto.Tensorflow.Core.Util.TestLog
asGraphDefTensorFlow.Build, TensorFlow.Core
asinTensorFlow.GenOps.Core
asin'TensorFlow.GenOps.Core
asinhTensorFlow.GenOps.Core
asinh'TensorFlow.GenOps.Core
assertTensorFlow.GenOps.Core
assert'TensorFlow.GenOps.Core
assertAllCloseTensorFlow.Test
AssetFileDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
assetFileDefProto.Tensorflow.Core.Protobuf.MetaGraph
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
3 (Function)TensorFlow.Ops
assign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
3 (Function)TensorFlow.Ops
assignAdd 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
assignAdd' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
assignAddVariableOpTensorFlow.GenOps.Core
assignAddVariableOp'TensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignSub'TensorFlow.GenOps.Core
assignSubVariableOpTensorFlow.GenOps.Core
assignSubVariableOp'TensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
assignVariableOp'TensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asString'TensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
atanTensorFlow.GenOps.Core
atan'TensorFlow.GenOps.Core
atan2TensorFlow.GenOps.Core
atan2'TensorFlow.GenOps.Core
atanhTensorFlow.GenOps.Core
atanh'TensorFlow.GenOps.Core
Attr 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
attr 
1 (Function)Proto.Tensorflow.Core.Framework.Function
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBaseTypeTensorFlow.OpGen.ParsedOp
AttrBoolTensorFlow.OpGen.ParsedOp
AttrBytesTensorFlow.OpGen.ParsedOp
attrDescriptionTensorFlow.OpGen.ParsedOp
AttrFloatTensorFlow.OpGen.ParsedOp
AttributeTensorFlow.Types
attrInfoTensorFlow.OpGen.ParsedOp
AttrInt64TensorFlow.OpGen.ParsedOp
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.ParsedOp
attrNameTensorFlow.OpGen.ParsedOp
AttrShapeTensorFlow.OpGen.ParsedOp
AttrSingleTensorFlow.OpGen.ParsedOp
AttrTensorTensorFlow.OpGen.ParsedOp
AttrType 
1 (Type/Class)TensorFlow.OpGen.ParsedOp
2 (Data Constructor)TensorFlow.OpGen.ParsedOp
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'BProto.Tensorflow.Core.Framework.AttrValue
AttrValue'FProto.Tensorflow.Core.Framework.AttrValue
AttrValue'FuncProto.Tensorflow.Core.Framework.AttrValue
AttrValue'IProto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListProto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'PlaceholderProto.Tensorflow.Core.Framework.AttrValue
AttrValue'SProto.Tensorflow.Core.Framework.AttrValue
AttrValue'ShapeProto.Tensorflow.Core.Framework.AttrValue
AttrValue'TensorProto.Tensorflow.Core.Framework.AttrValue
AttrValue'TypeProto.Tensorflow.Core.Framework.AttrValue
AttrValue'ValueProto.Tensorflow.Core.Framework.AttrValue
audioProto.Tensorflow.Core.Framework.Summary
audioSpectrogramTensorFlow.GenOps.Core
audioSpectrogram'TensorFlow.GenOps.Core
audioSummaryTensorFlow.GenOps.Core
audioSummary'TensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
audioSummaryV2'TensorFlow.GenOps.Core
autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
AutoParallelOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.RewriterConfig
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.RewriterConfig
availableProto.Tensorflow.Core.Util.TestLog
AvailableDeviceInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
availableDeviceInfoProto.Tensorflow.Core.Util.TestLog
avgPoolTensorFlow.GenOps.Core
avgPool'TensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3D'TensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPool3DGrad'TensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
avgPoolGrad'TensorFlow.GenOps.Core
bProto.Tensorflow.Core.Framework.AttrValue
backPropProto.Tensorflow.Core.Protobuf.ControlFlow
badConsumersProto.Tensorflow.Core.Framework.Versions
barrierTensorFlow.GenOps.Core
barrier'TensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierClose'TensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierIncompleteSize'TensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierInsertMany'TensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
barrierReadySize'TensorFlow.GenOps.Core
barrierTakeManyTensorFlow.GenOps.Core
barrierTakeMany'TensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholesky'TensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchCholeskyGrad'TensorFlow.GenOps.Core
batchDatasetTensorFlow.GenOps.Core
batchDataset'TensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT'TensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT2D'TensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchFFT3D'TensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT'TensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT2D'TensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchIFFT3D'TensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatMul'TensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixBandPart'TensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDeterminant'TensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiag'TensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixDiagPart'TensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixInverse'TensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSetDiag'TensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolve'TensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixSolveLs'TensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchMatrixTriangularSolve'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEig'TensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSelfAdjointEigV2'TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchSvd'TensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpace'TensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
batchToSpaceND'TensorFlow.GenOps.Core
BenchmarkEntries 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
BenchmarkEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
BenchmarkEntry'ExtrasEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
benchmarkTypeProto.Tensorflow.Core.Util.TestLog
betaincTensorFlow.GenOps.Core
betainc'TensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAdd'TensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddGrad'TensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasAddV1'TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bincountTensorFlow.GenOps.Core
bincount'TensorFlow.GenOps.Core
bitcastTensorFlow.GenOps.Core
bitcast'TensorFlow.GenOps.Core
bitsProto.Tensorflow.Core.Util.TestLog
bitwiseAndTensorFlow.GenOps.Core
bitwiseAnd'TensorFlow.GenOps.Core
bitwiseOrTensorFlow.GenOps.Core
bitwiseOr'TensorFlow.GenOps.Core
bitwiseXorTensorFlow.GenOps.Core
bitwiseXor'TensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
branchProto.Tensorflow.Core.Protobuf.ControlFlow
broadcastArgsTensorFlow.GenOps.Core
broadcastArgs'TensorFlow.GenOps.Core
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
broadcastGradientArgs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
bucketProto.Tensorflow.Core.Framework.Summary
bucketizeTensorFlow.GenOps.Core
bucketize'TensorFlow.GenOps.Core
bucketLimitProto.Tensorflow.Core.Framework.Summary
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
BuildConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
buildConfigurationProto.Tensorflow.Core.Util.TestLog
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
BuildInputsTensorFlow.BuildOp
buildInputsTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildResultTensorFlow.BuildOp
buildResultTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
BundleEntryProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorBundle
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorBundle
BundleHeaderProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorBundle
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorBundle
BundleHeaderProto'BIGProto.Tensorflow.Core.Protobuf.TensorBundle
BundleHeaderProto'EndiannessProto.Tensorflow.Core.Protobuf.TensorBundle
BundleHeaderProto'LITTLEProto.Tensorflow.Core.Protobuf.TensorBundle
busId 
1 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
2 (Function)Proto.Tensorflow.Core.Util.TestLog
BytesList 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
bytesList 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
cacheDatasetTensorFlow.GenOps.Core
cacheDataset'TensorFlow.GenOps.Core
cacheSizeProto.Tensorflow.Core.Util.TestLog
camelCaseTensorFlow.OpGen.ParsedOp
CANCELLEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
cancelOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
cast' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ccFlagsProto.Tensorflow.Core.Util.TestLog
ceilTensorFlow.GenOps.Core
ceil'TensorFlow.GenOps.Core
changelistProto.Tensorflow.Core.Util.TestLog
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
checkNumerics'TensorFlow.GenOps.Core
checkpointPathProto.Tensorflow.Core.Util.Event
choleskyTensorFlow.GenOps.Core
cholesky'TensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
choleskyGrad'TensorFlow.GenOps.Core
closeOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
ClusterDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Cluster
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Cluster
clusterDefProto.Tensorflow.Core.Protobuf.Config
CodeProto.Tensorflow.Core.Lib.Core.ErrorCodes
collectAllSummariesTensorFlow.Tensor
CollectionDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
collectionDefProto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'AnyList 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'AnyList'Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'BytesList 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'BytesList'Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'FloatList 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'FloatList'Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'Int64List 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'Int64List'Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'KindProto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'NodeList 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'NodeList'Proto.Tensorflow.Core.Protobuf.MetaGraph
colocateWithTensorFlow.Tensor, TensorFlow.Core
colorspaceProto.Tensorflow.Core.Framework.Summary
CommitId 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
commitIdProto.Tensorflow.Core.Util.TestLog
CommitId'ChangelistProto.Tensorflow.Core.Util.TestLog
CommitId'HashProto.Tensorflow.Core.Util.TestLog
CommitId'KindProto.Tensorflow.Core.Util.TestLog
complexTensorFlow.GenOps.Core
complex'TensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
complexAbs'TensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
computeAccidentalHits'TensorFlow.GenOps.Core
computeCostProto.Tensorflow.Core.Framework.CostGraph
computeTimeProto.Tensorflow.Core.Framework.CostGraph
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concat' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatenateDatasetTensorFlow.GenOps.Core
concatenateDataset'TensorFlow.GenOps.Core
concatOffsetTensorFlow.GenOps.Core
concatOffset'TensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
concatV2'TensorFlow.GenOps.Core
CondContextDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
conditionalAccumulatorTensorFlow.GenOps.Core
conditionalAccumulator'TensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
conj'TensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
const'TensorFlow.GenOps.Core
constantTensorFlow.Ops
constant'TensorFlow.Ops
constantFoldingProto.Tensorflow.Core.Protobuf.RewriterConfig
constraintProto.Tensorflow.Core.Framework.KernelDef
containerProto.Tensorflow.Core.Framework.ResourceHandle
contentProto.Tensorflow.Core.Framework.Summary
contentTypeProto.Tensorflow.Core.Framework.Summary
contextProto.Tensorflow.Core.Example.Example
contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
controlInputProto.Tensorflow.Core.Framework.CostGraph
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
controlTriggerTensorFlow.GenOps.Core
controlTrigger'TensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2D'TensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropFilter'TensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv2DBackpropInput'TensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3D'TensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilter'TensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropFilterV2'TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInput'TensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
conv3DBackpropInputV2'TensorFlow.GenOps.Core
cooSparseProto.Tensorflow.Core.Protobuf.MetaGraph
cosTensorFlow.GenOps.Core
cos'TensorFlow.GenOps.Core
coshTensorFlow.GenOps.Core
cosh'TensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
CostGraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
2 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
CostGraphDef'Node 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
2 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
CostGraphDef'Node'InputInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
2 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
CostGraphDef'Node'OutputInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
2 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
countUpToTensorFlow.GenOps.Core
countUpTo'TensorFlow.GenOps.Core
cpuGovernorProto.Tensorflow.Core.Util.TestLog
CPUInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
cpuInfoProto.Tensorflow.Core.Util.TestLog
CPUInfo'CacheSizeEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
cpuTimeProto.Tensorflow.Core.Util.TestLog
crc32cProto.Tensorflow.Core.Protobuf.TensorBundle
cropAndResizeTensorFlow.GenOps.Core
cropAndResize'TensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradBoxes'TensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
cropAndResizeGradImage'TensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cross'TensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCBeamSearchDecoder'TensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCGreedyDecoder'TensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cTCLoss'TensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumprod'TensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
cumsum'TensorFlow.GenOps.Core
data'Proto.Tensorflow.Core.Util.SavedTensorSlice
DataType 
1 (Type/Class)TensorFlow.Types
2 (Type/Class)Proto.Tensorflow.Core.Framework.Types
DATA_LOSSProto.Tensorflow.Core.Lib.Core.ErrorCodes
dcomplexValProto.Tensorflow.Core.Framework.Tensor
DEADLINE_EXCEEDEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
debugGradientIdentityTensorFlow.GenOps.Core
debugGradientIdentity'TensorFlow.GenOps.Core
debugOpsProto.Tensorflow.Core.Protobuf.Debug
DebugOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Debug
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Debug
debugOptionsProto.Tensorflow.Core.Protobuf.Config
DebugTensorWatch 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Debug
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Debug
debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Debug
debugUrlsProto.Tensorflow.Core.Protobuf.Debug
decodeBase64TensorFlow.GenOps.Core
decodeBase64'TensorFlow.GenOps.Core
decodeBmpTensorFlow.GenOps.Core
decodeBmp'TensorFlow.GenOps.Core
decodeCSVTensorFlow.GenOps.Core
decodeCSV'TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeGif'TensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJpeg'TensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodeJSONExample'TensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodePng'TensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeRaw'TensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types, TensorFlow.Core
decodeTFRecordsTensorFlow.Records.Conduit
decodeWavTensorFlow.GenOps.Core
decodeWav'TensorFlow.GenOps.Core
defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
defaultValue 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
deferredProto.Tensorflow.Core.Framework.LogMemory
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deleteSessionTensor'TensorFlow.GenOps.Core
denseShapeTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
denseToDenseSetOperationTensorFlow.GenOps.Core
denseToDenseSetOperation'TensorFlow.GenOps.Core
denseToSparseBatchDatasetTensorFlow.GenOps.Core
denseToSparseBatchDataset'TensorFlow.GenOps.Core
denseToSparseSetOperationTensorFlow.GenOps.Core
denseToSparseSetOperation'TensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthToSpace'TensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNative'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequantize'TensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
deserializeManySparse'TensorFlow.GenOps.Core
destroyResourceOpTensorFlow.GenOps.Core
destroyResourceOp'TensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
destroyTemporaryVariable'TensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Core
2 (Type/Class)TensorFlow.Output, TensorFlow.Core
device 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
4 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
DeviceAttributes 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.DeviceAttributes
2 (Type/Class)Proto.Tensorflow.Core.Framework.DeviceAttributes
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceInfoProto.Tensorflow.Core.Util.TestLog
DeviceLocality 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.DeviceAttributes
2 (Type/Class)Proto.Tensorflow.Core.Framework.DeviceAttributes
deviceNameTensorFlow.Output, TensorFlow.Core
devicePersistentMemorySize 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
devicePersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
DeviceStepStats 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
deviceTempMemorySize 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
deviceType 
1 (Function)Proto.Tensorflow.Core.Framework.KernelDef
2 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
devStatsProto.Tensorflow.Core.Framework.StepStats
diagTensorFlow.GenOps.Core
diag'TensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
diagPart'TensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
digamma'TensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2D'TensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropFilter'TensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dilation2DBackpropInput'TensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
disableModelPruningProto.Tensorflow.Core.Protobuf.RewriterConfig
displayNameProto.Tensorflow.Core.Framework.Summary
divTensorFlow.GenOps.Core
div'TensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
doubleValueProto.Tensorflow.Core.Util.TestLog
DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_Proto.Tensorflow.Core.Lib.Core.ErrorCodes
drawBoundingBoxesTensorFlow.GenOps.Core
drawBoundingBoxes'TensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtype 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
3 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
4 (Function)Proto.Tensorflow.Core.Framework.CostGraph
5 (Function)Proto.Tensorflow.Core.Framework.Tensor
6 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
DT_BFLOAT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INVALID 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicPartition'TensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
dynamicStitch'TensorFlow.GenOps.Core
editDistanceTensorFlow.GenOps.Core
editDistance'TensorFlow.GenOps.Core
elementProto.Tensorflow.Core.Util.MemmappedFileSystem
eluTensorFlow.GenOps.Core
elu'TensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
eluGrad'TensorFlow.GenOps.Core
embeddingLookupTensorFlow.EmbeddingOps
enableProto.Tensorflow.Core.Protobuf.RewriterConfig
enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
encodeBase64TensorFlow.GenOps.Core
encodeBase64'TensorFlow.GenOps.Core
encodedAudioStringProto.Tensorflow.Core.Framework.Summary
encodedImageStringProto.Tensorflow.Core.Framework.Summary
encodeJpegTensorFlow.GenOps.Core
encodeJpeg'TensorFlow.GenOps.Core
encodeOutputTensorFlow.Build
encodePngTensorFlow.GenOps.Core
encodePng'TensorFlow.GenOps.Core
encodeTensorDataTensorFlow.Types, TensorFlow.Core
encodeTFRecordsTensorFlow.Records.Conduit
encodeWavTensorFlow.GenOps.Core
encodeWav'TensorFlow.GenOps.Core
endiannessProto.Tensorflow.Core.Protobuf.TensorBundle
enqueueTensorFlow.Queue
enqueueOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
enterTensorFlow.GenOps.Core
enter'TensorFlow.GenOps.Core
entriesProto.Tensorflow.Core.Util.TestLog
entryProto.Tensorflow.Core.Util.TestLog
EntryValue 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
EntryValue'DoubleValueProto.Tensorflow.Core.Util.TestLog
EntryValue'KindProto.Tensorflow.Core.Util.TestLog
EntryValue'StringValueProto.Tensorflow.Core.Util.TestLog
eqLengthGuardTensorFlow.BuildOp
equal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
equal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
erfTensorFlow.GenOps.Core
erf'TensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
erfc'TensorFlow.GenOps.Core
evalBuildTTensorFlow.Build
Event 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
Event'FileVersionProto.Tensorflow.Core.Util.Event
Event'GraphDefProto.Tensorflow.Core.Util.Event
Event'LogMessageProto.Tensorflow.Core.Util.Event
Event'MetaGraphDefProto.Tensorflow.Core.Util.Event
Event'SessionLogProto.Tensorflow.Core.Util.Event
Event'SummaryProto.Tensorflow.Core.Util.Event
Event'TaggedRunMetadataProto.Tensorflow.Core.Util.Event
Event'WhatProto.Tensorflow.Core.Util.Event
EventWriterTensorFlow.Logging
Example 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Example
2 (Type/Class)Proto.Tensorflow.Core.Example.Example
ExampleParserConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
ExampleParserConfiguration'FeatureMapEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
ExcludedCaseTensorFlow.Types
excludeListTensorFlow.OpGen
exitTensorFlow.GenOps.Core
exit'TensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
exp'TensorFlow.GenOps.Core
expandDims 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
expandDims' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
explanationProto.Tensorflow.Core.Framework.OpDef
explicitInputAttrsTensorFlow.OpGen.ParsedOp
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
expm1TensorFlow.GenOps.Core
expm1'TensorFlow.GenOps.Core
exprTensorFlow.Tensor, TensorFlow.Core
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
extentProto.Tensorflow.Core.Framework.TensorSlice
externalValuesProto.Tensorflow.Core.Protobuf.ControlFlow
extractGlimpseTensorFlow.GenOps.Core
extractGlimpse'TensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
extractImagePatches'TensorFlow.GenOps.Core
extrasProto.Tensorflow.Core.Util.TestLog
fProto.Tensorflow.Core.Framework.AttrValue
factTensorFlow.GenOps.Core
fact'TensorFlow.GenOps.Core
FAILED_PRECONDITIONProto.Tensorflow.Core.Lib.Core.ErrorCodes
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
fakeQueueTensorFlow.GenOps.Core
fakeQueue'TensorFlow.GenOps.Core
Feature 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featureProto.Tensorflow.Core.Example.Feature
Feature'BytesListProto.Tensorflow.Core.Example.Feature
Feature'FloatListProto.Tensorflow.Core.Example.Feature
Feature'Int64ListProto.Tensorflow.Core.Example.Feature
Feature'KindProto.Tensorflow.Core.Example.Feature
FeatureConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
FeatureConfiguration'ConfigProto.Tensorflow.Core.Example.ExampleParserConfiguration
FeatureConfiguration'FixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
FeatureConfiguration'VarLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
FeatureList 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featureListProto.Tensorflow.Core.Example.Feature
FeatureLists 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featureListsProto.Tensorflow.Core.Example.Example
FeatureLists'FeatureListEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featureMapProto.Tensorflow.Core.Example.ExampleParserConfiguration
Features 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featuresProto.Tensorflow.Core.Example.Example
Features'FeatureEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
Feed 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
fFTTensorFlow.GenOps.Core
fFT'TensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT2D'TensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fFT3D'TensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fIFOQueue'TensorFlow.GenOps.Core
fIFOQueueV2TensorFlow.GenOps.Core
fIFOQueueV2'TensorFlow.GenOps.Core
filenameProto.Tensorflow.Core.Protobuf.MetaGraph
filenameTensorNameProto.Tensorflow.Core.Protobuf.Saver
fileVersionProto.Tensorflow.Core.Util.Event
fill 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fill' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
FixedLenFeatureProto 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
fixedLengthRecordDatasetTensorFlow.GenOps.Core
fixedLengthRecordDataset'TensorFlow.GenOps.Core
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedLengthRecordReader'TensorFlow.GenOps.Core
fixedLengthRecordReaderV2TensorFlow.GenOps.Core
fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
flagParserTensorFlow.OpGen
FloatList 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
floatList 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
floatValProto.Tensorflow.Core.Framework.Tensor
floorTensorFlow.GenOps.Core
floor'TensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorDiv'TensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
floorMod'TensorFlow.GenOps.Core
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
forceGpuCompatibleProto.Tensorflow.Core.Protobuf.Config
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPool'TensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalAvgPoolGrad'TensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPool'TensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolGrad'TensorFlow.GenOps.Core
fromTensorTypeListTensorFlow.Types
fromTensorTypesTensorFlow.Types
fullNameProto.Tensorflow.Core.Framework.Variable
fullShapeProto.Tensorflow.Core.Framework.Variable
funcProto.Tensorflow.Core.Framework.AttrValue
functionProto.Tensorflow.Core.Framework.Function
FunctionDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
FunctionDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
FunctionDef'RetEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
FunctionDefLibrary 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
functionNameProto.Tensorflow.Core.Framework.Function
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNorm'TensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedBatchNormGrad'TensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedPadConv2D'TensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
gatherTensorFlow.GenOps.Core
gather'TensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
gatherNd'TensorFlow.GenOps.Core
gatherV2TensorFlow.GenOps.Core
gatherV2'TensorFlow.GenOps.Core
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getSessionHandleTensorFlow.GenOps.Core
getSessionHandle'TensorFlow.GenOps.Core
getSessionHandleV2TensorFlow.GenOps.Core
getSessionHandleV2'TensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
getSessionTensor'TensorFlow.GenOps.Core
getTFRecordTensorFlow.Records
getTFRecordDataTensorFlow.Records
getTFRecordLengthTensorFlow.Records
getTFRecordsTensorFlow.Records
getVarIntTensorFlow.Internal.VarInt
globalJitLevelProto.Tensorflow.Core.Protobuf.Config
globalNameProto.Tensorflow.Core.Protobuf.Config
globalStepProto.Tensorflow.Core.Protobuf.Debug
GPUInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
GPUOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
gpuOptionsProto.Tensorflow.Core.Protobuf.Config
gradientProto.Tensorflow.Core.Framework.Function
GradientCompatibleTensorFlow.Gradient
GradientDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
gradientDescentTensorFlow.Minimize
gradientFuncProto.Tensorflow.Core.Framework.Function
gradientsTensorFlow.Gradient
GraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
2 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
graphDef 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Util.Event
GraphOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
graphOptionsProto.Tensorflow.Core.Protobuf.Config
GraphStateTensorFlow.Build
greaterTensorFlow.GenOps.Core
greater'TensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
greaterEqual'TensorFlow.GenOps.Core
groupTensorFlow.ControlFlow, TensorFlow.Core
halfValProto.Tensorflow.Core.Framework.Tensor
handleProto.Tensorflow.Core.Framework.LogMemory
hashProto.Tensorflow.Core.Util.TestLog
hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
hashTableTensorFlow.GenOps.Core
hashTable'TensorFlow.GenOps.Core
hashTableV2TensorFlow.GenOps.Core
hashTableV2'TensorFlow.GenOps.Core
HaskellName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
haskellNameTensorFlow.OpGen.ParsedOp
hasMinimumProto.Tensorflow.Core.Framework.OpDef
hasSingleReferenceProto.Tensorflow.Core.Framework.AllocationDescription
heightProto.Tensorflow.Core.Framework.Summary
histoProto.Tensorflow.Core.Framework.Summary
HistogramProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
histogramSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
histogramSummary'TensorFlow.GenOps.Core
hoistBuildTTensorFlow.Build
hostMemoryArgProto.Tensorflow.Core.Framework.KernelDef
hostnameProto.Tensorflow.Core.Util.TestLog
hostPersistentMemorySize 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
hostPersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
hostTempMemorySize 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
hSVToRGBTensorFlow.GenOps.Core
hSVToRGB'TensorFlow.GenOps.Core
iProto.Tensorflow.Core.Framework.AttrValue
idProto.Tensorflow.Core.Framework.CostGraph
identity 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identity' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identityReaderTensorFlow.GenOps.Core
identityReader'TensorFlow.GenOps.Core
identityReaderV2TensorFlow.GenOps.Core
identityReaderV2'TensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT'TensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT2D'TensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
iFFT3D'TensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igamma'TensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
igammac'TensorFlow.GenOps.Core
ignoreErrorsDatasetTensorFlow.GenOps.Core
ignoreErrorsDataset'TensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imag'TensorFlow.GenOps.Core
imageProto.Tensorflow.Core.Framework.Summary
imageSummaryTensorFlow.GenOps.Core
imageSummary'TensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
immutableConst'TensorFlow.GenOps.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
incarnationProto.Tensorflow.Core.Framework.DeviceAttributes
indexProto.Tensorflow.Core.Framework.LogMemory
indicesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
indicesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
inferredTypeAttrsTensorFlow.OpGen.ParsedOp
inferShapesProto.Tensorflow.Core.Protobuf.Config
initializedValueTensorFlow.Variable
initializedVariable 
1 (Function)TensorFlow.Variable
2 (Function)TensorFlow.Ops
initializedVariable' 
1 (Function)TensorFlow.Variable
2 (Function)TensorFlow.Ops
initializerNameProto.Tensorflow.Core.Framework.Variable
initializeTableTensorFlow.GenOps.Core
initializeTable'TensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
initializeTableFromTextFile'TensorFlow.GenOps.Core
initializeTableFromTextFileV2TensorFlow.GenOps.Core
initializeTableFromTextFileV2'TensorFlow.GenOps.Core
initializeTableV2TensorFlow.GenOps.Core
initializeTableV2'TensorFlow.GenOps.Core
inputProto.Tensorflow.Core.Framework.NodeDef
inputArgProto.Tensorflow.Core.Framework.OpDef
inputInfoProto.Tensorflow.Core.Framework.CostGraph
inputsProto.Tensorflow.Core.Protobuf.MetaGraph
Int64List 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
int64List 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
int64ValProto.Tensorflow.Core.Framework.Tensor
INTERNALProto.Tensorflow.Core.Lib.Core.ErrorCodes
interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
inTopKTensorFlow.GenOps.Core
inTopK'TensorFlow.GenOps.Core
intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
intValProto.Tensorflow.Core.Framework.Tensor
invTensorFlow.GenOps.Core
inv'TensorFlow.GenOps.Core
INVALID_ARGUMENTProto.Tensorflow.Core.Lib.Core.ErrorCodes
invertTensorFlow.GenOps.Core
invert'TensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invertPermutation'TensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
invGrad'TensorFlow.GenOps.Core
iRFFTTensorFlow.GenOps.Core
iRFFT'TensorFlow.GenOps.Core
iRFFT2DTensorFlow.GenOps.Core
iRFFT2D'TensorFlow.GenOps.Core
iRFFT3DTensorFlow.GenOps.Core
iRFFT3D'TensorFlow.GenOps.Core
isAggregateProto.Tensorflow.Core.Framework.OpDef
isCommutativeProto.Tensorflow.Core.Framework.OpDef
isFinalProto.Tensorflow.Core.Framework.CostGraph
isFiniteTensorFlow.GenOps.Core
isFinite'TensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isInf'TensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isNan'TensorFlow.GenOps.Core
isRefProto.Tensorflow.Core.Framework.OpDef
isResourceProto.Tensorflow.Core.Framework.Variable
isStatefulProto.Tensorflow.Core.Framework.OpDef
isVariableInitializedTensorFlow.GenOps.Core
isVariableInitialized'TensorFlow.GenOps.Core
iteratorTensorFlow.GenOps.Core
iterator'TensorFlow.GenOps.Core
iteratorDisposeTensorFlow.GenOps.Core
iteratorDispose'TensorFlow.GenOps.Core
iteratorFromStringHandleTensorFlow.GenOps.Core
iteratorFromStringHandle'TensorFlow.GenOps.Core
iteratorGetNextTensorFlow.GenOps.Core
iteratorGetNext'TensorFlow.GenOps.Core
iteratorToStringHandleTensorFlow.GenOps.Core
iteratorToStringHandle'TensorFlow.GenOps.Core
itersProto.Tensorflow.Core.Util.TestLog
jobProto.Tensorflow.Core.Protobuf.Cluster
JobDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Cluster
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Cluster
JobDef'TasksEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Cluster
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Cluster
jobNameProto.Tensorflow.Core.Protobuf.TensorflowServer
keepCheckpointEveryNHoursProto.Tensorflow.Core.Protobuf.Saver
KernelDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.KernelDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.KernelDef
KernelDef'AttrConstraint 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.KernelDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.KernelDef
kernelNameProto.Tensorflow.Core.Framework.LogMemory
key 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Example.Feature
3 (Function)Proto.Tensorflow.Core.Protobuf.Config
4 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
5 (Function)Proto.Tensorflow.Core.Framework.Function
6 (Function)Proto.Tensorflow.Core.Framework.NodeDef
7 (Function)Proto.Tensorflow.Core.Framework.AttrValue
8 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
9 (Function)Proto.Tensorflow.Core.Protobuf.ControlFlow
10 (Function)Proto.Tensorflow.Core.Util.TestLog
l2LossTensorFlow.GenOps.Core
l2Loss'TensorFlow.GenOps.Core
labelProto.Tensorflow.Core.Framework.KernelDef
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
lengthProto.Tensorflow.Core.Framework.TensorSlice
lengthFramesProto.Tensorflow.Core.Framework.Summary
lessTensorFlow.GenOps.Core
less'TensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lessEqual'TensorFlow.GenOps.Core
levelProto.Tensorflow.Core.Util.Event
lgammaTensorFlow.GenOps.Core
lgamma'TensorFlow.GenOps.Core
libraryProto.Tensorflow.Core.Framework.Graph
linkageProto.Tensorflow.Core.Util.TestLog
linSpaceTensorFlow.GenOps.Core
linSpace'TensorFlow.GenOps.Core
ListTensorFlow.Types
listProto.Tensorflow.Core.Framework.AttrValue
ListArgTensorFlow.OpGen.ParsedOp
listDiffTensorFlow.GenOps.Core
listDiff'TensorFlow.GenOps.Core
ListOfTensorFlow.Types
liveBytesProto.Tensorflow.Core.Framework.StepStats
lMDBReaderTensorFlow.GenOps.Core
lMDBReader'TensorFlow.GenOps.Core
localityProto.Tensorflow.Core.Framework.DeviceAttributes
logTensorFlow.GenOps.Core
log'TensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
log1p'TensorFlow.GenOps.Core
logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
logEventTensorFlow.Logging
logGraphTensorFlow.Logging
logicalAndTensorFlow.GenOps.Core
logicalAnd'TensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalNot'TensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logicalOr'TensorFlow.GenOps.Core
LogMessage 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
logMessageProto.Tensorflow.Core.Util.Event
LogMessage'DEBUGGINGProto.Tensorflow.Core.Util.Event
LogMessage'ERRORProto.Tensorflow.Core.Util.Event
LogMessage'FATALProto.Tensorflow.Core.Util.Event
LogMessage'INFOProto.Tensorflow.Core.Util.Event
LogMessage'LevelProto.Tensorflow.Core.Util.Event
LogMessage'UNKNOWNProto.Tensorflow.Core.Util.Event
LogMessage'WARNProto.Tensorflow.Core.Util.Event
logSoftmaxTensorFlow.GenOps.Core
logSoftmax'TensorFlow.GenOps.Core
logSummaryTensorFlow.Logging
logUniformCandidateSamplerTensorFlow.GenOps.Core
logUniformCandidateSampler'TensorFlow.GenOps.Core
lookupNodeTensorFlow.Build
lookupTableExportTensorFlow.GenOps.Core
lookupTableExport'TensorFlow.GenOps.Core
lookupTableExportV2TensorFlow.GenOps.Core
lookupTableExportV2'TensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableFind'TensorFlow.GenOps.Core
lookupTableFindV2TensorFlow.GenOps.Core
lookupTableFindV2'TensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableImport'TensorFlow.GenOps.Core
lookupTableImportV2TensorFlow.GenOps.Core
lookupTableImportV2'TensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableInsert'TensorFlow.GenOps.Core
lookupTableInsertV2TensorFlow.GenOps.Core
lookupTableInsertV2'TensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
lookupTableSize'TensorFlow.GenOps.Core
lookupTableSizeV2TensorFlow.GenOps.Core
lookupTableSizeV2'TensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
loopCond'TensorFlow.GenOps.Core
loopEnterNamesProto.Tensorflow.Core.Protobuf.ControlFlow
loopExitNamesProto.Tensorflow.Core.Protobuf.ControlFlow
lRNTensorFlow.GenOps.Core
lRN'TensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
lRNGrad'TensorFlow.GenOps.Core
machineProto.Tensorflow.Core.Util.TestLog
MachineConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
machineConfigurationProto.Tensorflow.Core.Util.TestLog
makeIteratorTensorFlow.GenOps.Core
makeIterator'TensorFlow.GenOps.Core
makeQueueTensorFlow.Queue
mapClearTensorFlow.GenOps.Core
mapClear'TensorFlow.GenOps.Core
mapIncompleteSizeTensorFlow.GenOps.Core
mapIncompleteSize'TensorFlow.GenOps.Core
mapPeekTensorFlow.GenOps.Core
mapPeek'TensorFlow.GenOps.Core
mapSizeTensorFlow.GenOps.Core
mapSize'TensorFlow.GenOps.Core
mapStageTensorFlow.GenOps.Core
mapStage'TensorFlow.GenOps.Core
mapUnstageTensorFlow.GenOps.Core
mapUnstage'TensorFlow.GenOps.Core
mapUnstageNoKeyTensorFlow.GenOps.Core
mapUnstageNoKey'TensorFlow.GenOps.Core
matchingFilesTensorFlow.GenOps.Core
matchingFiles'TensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matMul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixBandPart'TensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDeterminant'TensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiag'TensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixDiagPart'TensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixInverse'TensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSetDiag'TensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolve'TensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixSolveLs'TensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matrixTriangularSolve'TensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
matTranspose'TensorFlow.Ops
max 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
max'TensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maximum'TensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool'TensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3D'TensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPool3DGrad'TensorFlow.GenOps.Core
maxPool3DGradGradTensorFlow.GenOps.Core
maxPool3DGradGrad'TensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGrad'TensorFlow.GenOps.Core
maxPoolGradGradTensorFlow.GenOps.Core
maxPoolGradGrad'TensorFlow.GenOps.Core
maxPoolGradGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradGradWithArgmax'TensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradWithArgmax'TensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmax'TensorFlow.GenOps.Core
maxToKeepProto.Tensorflow.Core.Protobuf.Saver
maybe'allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
maybe'allowedValues 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.KernelDef
maybe'anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'anyListProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'audioProto.Tensorflow.Core.Framework.Summary
maybe'autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'buildConfigurationProto.Tensorflow.Core.Util.TestLog
maybe'bytesList 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
maybe'changelistProto.Tensorflow.Core.Util.TestLog
maybe'clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
maybe'clusterDefProto.Tensorflow.Core.Protobuf.Config
maybe'commitIdProto.Tensorflow.Core.Util.TestLog
maybe'configProto.Tensorflow.Core.Example.ExampleParserConfiguration
maybe'contextProto.Tensorflow.Core.Example.Example
maybe'cooSparseProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'cpuInfoProto.Tensorflow.Core.Util.TestLog
maybe'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
maybe'defaultValue 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'doubleValueProto.Tensorflow.Core.Util.TestLog
maybe'encodingProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'entriesProto.Tensorflow.Core.Util.TestLog
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'featureListsProto.Tensorflow.Core.Example.Example
maybe'featuresProto.Tensorflow.Core.Example.Example
maybe'fileVersionProto.Tensorflow.Core.Util.Event
maybe'fixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
maybe'floatList 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphDef 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Util.Event
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'hashProto.Tensorflow.Core.Util.TestLog
maybe'hasLengthProto.Tensorflow.Core.Framework.TensorSlice
maybe'histoProto.Tensorflow.Core.Framework.Summary
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'imageProto.Tensorflow.Core.Framework.Summary
maybe'int64List 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
maybe'kind 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
3 (Function)Proto.Tensorflow.Core.Util.TestLog
maybe'lengthProto.Tensorflow.Core.Framework.TensorSlice
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'localityProto.Tensorflow.Core.Framework.DeviceAttributes
maybe'logMessageProto.Tensorflow.Core.Util.Event
maybe'machineConfigurationProto.Tensorflow.Core.Util.TestLog
maybe'memoryInfoProto.Tensorflow.Core.Util.TestLog
maybe'memoryStatsProto.Tensorflow.Core.Framework.StepStats
maybe'metaProto.Tensorflow.Core.Util.SavedTensorSlice
maybe'metadataProto.Tensorflow.Core.Framework.Summary
maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
maybe'metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'nameProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'nodeListProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'platformInfoProto.Tensorflow.Core.Util.TestLog
maybe'pluginDataProto.Tensorflow.Core.Framework.Summary
maybe'rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'runConfigurationProto.Tensorflow.Core.Util.TestLog
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
maybe'sessionLogProto.Tensorflow.Core.Util.Event
maybe'shape 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
4 (Function)Proto.Tensorflow.Core.Framework.CostGraph
5 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
6 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
maybe'signatureProto.Tensorflow.Core.Framework.Function
maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
maybe'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'stringValueProto.Tensorflow.Core.Util.TestLog
maybe'strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'summaryProto.Tensorflow.Core.Util.Event
maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
maybe'tensor 
1 (Function)Proto.Tensorflow.Core.Framework.LogMemory
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
3 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
4 (Function)Proto.Tensorflow.Core.Framework.Summary
maybe'tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
maybe'tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'tensorShape 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Example.Feature
3 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
4 (Function)Proto.Tensorflow.Core.Framework.Function
5 (Function)Proto.Tensorflow.Core.Framework.NodeDef
6 (Function)Proto.Tensorflow.Core.Framework.AttrValue
7 (Function)Proto.Tensorflow.Core.Framework.Summary
8 (Function)Proto.Tensorflow.Core.Util.TestLog
maybe'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
maybe'varLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
maybe'versionProto.Tensorflow.Core.Protobuf.TensorBundle
maybe'versions 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
maybe'whatProto.Tensorflow.Core.Util.Event
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
mean 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mean' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
MemmappedFileSystemDirectory 
1 (Data Constructor)Proto.Tensorflow.Core.Util.MemmappedFileSystem
2 (Type/Class)Proto.Tensorflow.Core.Util.MemmappedFileSystem
MemmappedFileSystemDirectoryElement 
1 (Data Constructor)Proto.Tensorflow.Core.Util.MemmappedFileSystem
2 (Type/Class)Proto.Tensorflow.Core.Util.MemmappedFileSystem
memoryProto.Tensorflow.Core.Framework.StepStats
MemoryInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
memoryInfoProto.Tensorflow.Core.Util.TestLog
memoryLimit 
1 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
2 (Function)Proto.Tensorflow.Core.Util.TestLog
MemoryLogRawAllocation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogRawDeallocation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogStep 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogTensorAllocation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogTensorDeallocation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogTensorOutput 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
memoryOptimizationProto.Tensorflow.Core.Protobuf.RewriterConfig
MemoryStats 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
memoryStatsProto.Tensorflow.Core.Framework.StepStats
memoryTimeProto.Tensorflow.Core.Framework.CostGraph
mergeTensorFlow.GenOps.Core
merge'TensorFlow.GenOps.Core
mergeAllSummariesTensorFlow.Logging
mergeSummaryTensorFlow.GenOps.Core
mergeSummary'TensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
mergeV2Checkpoints'TensorFlow.GenOps.Core
messageProto.Tensorflow.Core.Util.Event
metaProto.Tensorflow.Core.Util.SavedTensorSlice
metadataProto.Tensorflow.Core.Framework.Summary
MetaGraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
metaGraphDefProto.Tensorflow.Core.Util.Event
MetaGraphDef'CollectionDefEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
MetaGraphDef'MetaInfoDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
MetaGraphDef'SignatureDefEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
metaGraphsProto.Tensorflow.Core.Protobuf.SavedModel
metaGraphVersionProto.Tensorflow.Core.Protobuf.MetaGraph
metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
methodNameProto.Tensorflow.Core.Protobuf.MetaGraph
mfccTensorFlow.GenOps.Core
mfcc'TensorFlow.GenOps.Core
mhzPerCpuProto.Tensorflow.Core.Util.TestLog
min 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
min'TensorFlow.GenOps.Core
minConsumerProto.Tensorflow.Core.Framework.Versions
MinimizerTensorFlow.Minimize
minimizeWithTensorFlow.Minimize
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
minimum'TensorFlow.GenOps.Core
mirrorPadTensorFlow.GenOps.Core
mirrorPad'TensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
mirrorPadGrad'TensorFlow.GenOps.Core
MixedListArgTensorFlow.OpGen.ParsedOp
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mod'TensorFlow.GenOps.Core
modeProto.Tensorflow.Core.Util.TestLog
modelProto.Tensorflow.Core.Util.TestLog
MonadBuildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
msgProto.Tensorflow.Core.Util.Event
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
multinomial'TensorFlow.GenOps.Core
mutableDenseHashTableTensorFlow.GenOps.Core
mutableDenseHashTable'TensorFlow.GenOps.Core
mutableDenseHashTableV2TensorFlow.GenOps.Core
mutableDenseHashTableV2'TensorFlow.GenOps.Core
mutableHashTableTensorFlow.GenOps.Core
mutableHashTable'TensorFlow.GenOps.Core
mutableHashTableOfTensorsTensorFlow.GenOps.Core
mutableHashTableOfTensors'TensorFlow.GenOps.Core
mutableHashTableOfTensorsV2TensorFlow.GenOps.Core
mutableHashTableOfTensorsV2'TensorFlow.GenOps.Core
mutableHashTableV2TensorFlow.GenOps.Core
mutableHashTableV2'TensorFlow.GenOps.Core
Name 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
name 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.KernelDef
5 (Function)Proto.Tensorflow.Core.Framework.AttrValue
6 (Function)Proto.Tensorflow.Core.Framework.CostGraph
7 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
8 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
9 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
10 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
11 (Function)Proto.Tensorflow.Core.Framework.TensorShape
12 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
13 (Function)Proto.Tensorflow.Core.Util.MemmappedFileSystem
14 (Function)Proto.Tensorflow.Core.Util.TestLog
NameAttrList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NameAttrList'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NamedTensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.NamedTensor
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.NamedTensor
neg 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
neg' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
negTrainTensorFlow.GenOps.Core
negTrain'TensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nextIteration'TensorFlow.GenOps.Core
NilTensorFlow.Types
node 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
NodeDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
nodeDefProto.Tensorflow.Core.Framework.Function
NodeDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeExecStats 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
nodeListProto.Tensorflow.Core.Protobuf.MetaGraph
NodeName 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
nodeName 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.Summary
3 (Function)Proto.Tensorflow.Core.Protobuf.Debug
NodeOutput 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
NodesTensorFlow.Nodes, TensorFlow.Core
nodeStatsProto.Tensorflow.Core.Framework.StepStats
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
nonMaxSuppressionTensorFlow.GenOps.Core
nonMaxSuppression'TensorFlow.GenOps.Core
nonMaxSuppressionV2TensorFlow.GenOps.Core
nonMaxSuppressionV2'TensorFlow.GenOps.Core
noOp 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
noOp'TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
notEqual'TensorFlow.GenOps.Core
NOT_FOUNDProto.Tensorflow.Core.Lib.Core.ErrorCodes
numProto.Tensorflow.Core.Framework.Summary
numberAttrProto.Tensorflow.Core.Framework.OpDef
numBytesProto.Tensorflow.Core.Framework.LogMemory
numChannelsProto.Tensorflow.Core.Framework.Summary
numCoresProto.Tensorflow.Core.Util.TestLog
numCoresAllowedProto.Tensorflow.Core.Util.TestLog
numReplicasProto.Tensorflow.Core.Protobuf.RewriterConfig
numShardsProto.Tensorflow.Core.Protobuf.TensorBundle
numThreadsProto.Tensorflow.Core.Protobuf.Config
obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
offset 
1 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
2 (Function)Proto.Tensorflow.Core.Util.MemmappedFileSystem
OKProto.Tensorflow.Core.Lib.Core.ErrorCodes
oneHot 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
oneHot' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
OneOfTensorFlow.Types, TensorFlow.Core
OneOfsTensorFlow.Types
onesLikeTensorFlow.GenOps.Core
onesLike'TensorFlow.GenOps.Core
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.KernelDef
opAttrTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
operationProto.Tensorflow.Core.Framework.LogMemory
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
OpParamsTensorFlow.BuildOp
opStartRelMicrosProto.Tensorflow.Core.Framework.StepStats
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
optimizersProto.Tensorflow.Core.Protobuf.RewriterConfig
optimizeTensorLayoutProto.Tensorflow.Core.Protobuf.RewriterConfig
OptionsTensorFlow.Session, TensorFlow.Core
optLevelProto.Tensorflow.Core.Protobuf.Config
optsProto.Tensorflow.Core.Util.TestLog
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
orderedMapClearTensorFlow.GenOps.Core
orderedMapClear'TensorFlow.GenOps.Core
orderedMapIncompleteSizeTensorFlow.GenOps.Core
orderedMapIncompleteSize'TensorFlow.GenOps.Core
orderedMapPeekTensorFlow.GenOps.Core
orderedMapPeek'TensorFlow.GenOps.Core
orderedMapSizeTensorFlow.GenOps.Core
orderedMapSize'TensorFlow.GenOps.Core
orderedMapStageTensorFlow.GenOps.Core
orderedMapStage'TensorFlow.GenOps.Core
orderedMapUnstageTensorFlow.GenOps.Core
orderedMapUnstage'TensorFlow.GenOps.Core
orderedMapUnstageNoKeyTensorFlow.GenOps.Core
orderedMapUnstageNoKey'TensorFlow.GenOps.Core
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
output 
1 (Function)TensorFlow.Output
2 (Function)Proto.Tensorflow.Core.Framework.StepStats
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
outputInfoProto.Tensorflow.Core.Framework.CostGraph
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputNodeNameTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
outputsProto.Tensorflow.Core.Protobuf.MetaGraph
outputSlotProto.Tensorflow.Core.Protobuf.Debug
OUT_OF_RANGEProto.Tensorflow.Core.Lib.Core.ErrorCodes
pack 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
pack' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
padTensorFlow.GenOps.Core
pad'TensorFlow.GenOps.Core
paddedBatchDatasetTensorFlow.GenOps.Core
paddedBatchDataset'TensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
paddingFIFOQueue'TensorFlow.GenOps.Core
paddingFIFOQueueV2TensorFlow.GenOps.Core
paddingFIFOQueueV2'TensorFlow.GenOps.Core
padV2TensorFlow.GenOps.Core
padV2'TensorFlow.GenOps.Core
parallelConcatTensorFlow.GenOps.Core
parallelConcat'TensorFlow.GenOps.Core
parallelIterationsProto.Tensorflow.Core.Protobuf.ControlFlow
parameterizedTruncatedNormalTensorFlow.GenOps.Core
parameterizedTruncatedNormal'TensorFlow.GenOps.Core
ParsedArg 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
ParsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgDescriptionTensorFlow.OpGen.ParsedOp
parsedArgNameTensorFlow.OpGen.ParsedOp
parsedInputsTensorFlow.OpGen.ParsedOp
ParsedOp 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
parsedOpDescriptionTensorFlow.OpGen.ParsedOp
parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
parsedOpNameTensorFlow.OpGen.ParsedOp
parsedOpSummaryTensorFlow.OpGen.ParsedOp
parsedOutputsTensorFlow.OpGen.ParsedOp
parseExampleTensorFlow.GenOps.Core
parseExample'TensorFlow.GenOps.Core
parseOpTensorFlow.OpGen.ParsedOp
parseSingleSequenceExampleTensorFlow.GenOps.Core
parseSingleSequenceExample'TensorFlow.GenOps.Core
parseTensorTensorFlow.GenOps.Core
parseTensor'TensorFlow.GenOps.Core
partitionGraphsProto.Tensorflow.Core.Protobuf.Config
peakBytesProto.Tensorflow.Core.Framework.StepStats
PendingNodeNameTensorFlow.Output
PERMISSION_DENIEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
physicalDescriptionProto.Tensorflow.Core.Util.TestLog
physicalDeviceDescProto.Tensorflow.Core.Framework.DeviceAttributes
pivotForBodyNameProto.Tensorflow.Core.Protobuf.ControlFlow
pivotForPredNameProto.Tensorflow.Core.Protobuf.ControlFlow
pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
placeholder 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
placeholder' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
placeholderV2TensorFlow.GenOps.Core
placeholderV2'TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
placeholderWithDefault'TensorFlow.GenOps.Core
placementPeriodProto.Tensorflow.Core.Protobuf.Config
placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
PlatformInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
platformInfoProto.Tensorflow.Core.Util.TestLog
pluginDataProto.Tensorflow.Core.Framework.Summary
pluginNameProto.Tensorflow.Core.Framework.Summary
pollingActiveDelayUsecsProto.Tensorflow.Core.Protobuf.Config
pollingInactiveDelayMsecsProto.Tensorflow.Core.Protobuf.Config
polygammaTensorFlow.GenOps.Core
polygamma'TensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
pow'TensorFlow.GenOps.Core
precedingNodeProto.Tensorflow.Core.Framework.CostGraph
precedingPortProto.Tensorflow.Core.Framework.CostGraph
predNameProto.Tensorflow.Core.Protobuf.ControlFlow
prefixTensorFlow.OpGen
preventGradientTensorFlow.GenOps.Core
preventGradient'TensorFlow.GenOps.Core
printTensorFlow.GenOps.Core
print'TensorFlow.GenOps.Core
priorityQueueTensorFlow.GenOps.Core
priorityQueue'TensorFlow.GenOps.Core
priorityQueueV2TensorFlow.GenOps.Core
priorityQueueV2'TensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
prod'TensorFlow.GenOps.Core
producerProto.Tensorflow.Core.Framework.Versions
protocolProto.Tensorflow.Core.Protobuf.TensorflowServer
protoShapeTensorFlow.Types
ptr 
1 (Function)Proto.Tensorflow.Core.Framework.LogMemory
2 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
pureOpTensorFlow.BuildOp
PureResultTensorFlow.BuildOp
pureResultTensorFlow.BuildOp
putTFRecordTensorFlow.Records
putTFRecordDataTensorFlow.Records
putTFRecordLengthTensorFlow.Records
putVarIntTensorFlow.Internal.VarInt
qrTensorFlow.GenOps.Core
qr'TensorFlow.GenOps.Core
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizeAndDequantize'TensorFlow.GenOps.Core
quantizeAndDequantizeV2TensorFlow.GenOps.Core
quantizeAndDequantizeV2'TensorFlow.GenOps.Core
quantizeAndDequantizeV3TensorFlow.GenOps.Core
quantizeAndDequantizeV3'TensorFlow.GenOps.Core
quantizedAddTensorFlow.GenOps.Core
quantizedAdd'TensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedAvgPool'TensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedBiasAdd'TensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConcat'TensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedConv2D'TensorFlow.GenOps.Core
quantizedInstanceNormTensorFlow.GenOps.Core
quantizedInstanceNorm'TensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMatMul'TensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizedMaxPool'TensorFlow.GenOps.Core
quantizedMulTensorFlow.GenOps.Core
quantizedMul'TensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu'TensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedRelu6'TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReluX'TensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizedReshape'TensorFlow.GenOps.Core
quantizedResizeBilinearTensorFlow.GenOps.Core
quantizedResizeBilinear'TensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
quantizeV2'TensorFlow.GenOps.Core
QueueTensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueClose'TensorFlow.GenOps.Core
queueClosedExceptionTypesProto.Tensorflow.Core.Protobuf.QueueRunner
queueCloseV2TensorFlow.GenOps.Core
queueCloseV2'TensorFlow.GenOps.Core
queueDequeueTensorFlow.GenOps.Core
queueDequeue'TensorFlow.GenOps.Core
queueDequeueManyTensorFlow.GenOps.Core
queueDequeueMany'TensorFlow.GenOps.Core
queueDequeueManyV2TensorFlow.GenOps.Core
queueDequeueManyV2'TensorFlow.GenOps.Core
queueDequeueUpToTensorFlow.GenOps.Core
queueDequeueUpTo'TensorFlow.GenOps.Core
queueDequeueUpToV2TensorFlow.GenOps.Core
queueDequeueUpToV2'TensorFlow.GenOps.Core
queueDequeueV2TensorFlow.GenOps.Core
queueDequeueV2'TensorFlow.GenOps.Core
queueEnqueueTensorFlow.GenOps.Core
queueEnqueue'TensorFlow.GenOps.Core
queueEnqueueManyTensorFlow.GenOps.Core
queueEnqueueMany'TensorFlow.GenOps.Core
queueEnqueueManyV2TensorFlow.GenOps.Core
queueEnqueueManyV2'TensorFlow.GenOps.Core
queueEnqueueV2TensorFlow.GenOps.Core
queueEnqueueV2'TensorFlow.GenOps.Core
queueIsClosedTensorFlow.GenOps.Core
queueIsClosed'TensorFlow.GenOps.Core
queueIsClosedV2TensorFlow.GenOps.Core
queueIsClosedV2'TensorFlow.GenOps.Core
queueNameProto.Tensorflow.Core.Protobuf.QueueRunner
QueueRunnerDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.QueueRunner
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.QueueRunner
queueSizeTensorFlow.GenOps.Core
queueSize'TensorFlow.GenOps.Core
queueSizeV2TensorFlow.GenOps.Core
queueSizeV2'TensorFlow.GenOps.Core
randomCropTensorFlow.GenOps.Core
randomCrop'TensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomGamma'TensorFlow.GenOps.Core
randomPoissonTensorFlow.GenOps.Core
randomPoisson'TensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffle'TensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomShuffleQueue'TensorFlow.GenOps.Core
randomShuffleQueueV2TensorFlow.GenOps.Core
randomShuffleQueueV2'TensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomStandardNormal'TensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniform'TensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
randomUniformInt'TensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
range' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rangeDatasetTensorFlow.GenOps.Core
rangeDataset'TensorFlow.GenOps.Core
rankTensorFlow.GenOps.Core
rank'TensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumRecordsProduced'TensorFlow.GenOps.Core
readerNumRecordsProducedV2TensorFlow.GenOps.Core
readerNumRecordsProducedV2'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerRead'TensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerReadUpTo'TensorFlow.GenOps.Core
readerReadUpToV2TensorFlow.GenOps.Core
readerReadUpToV2'TensorFlow.GenOps.Core
readerReadV2TensorFlow.GenOps.Core
readerReadV2'TensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerReset'TensorFlow.GenOps.Core
readerResetV2TensorFlow.GenOps.Core
readerResetV2'TensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerRestoreState'TensorFlow.GenOps.Core
readerRestoreStateV2TensorFlow.GenOps.Core
readerRestoreStateV2'TensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readerSerializeState'TensorFlow.GenOps.Core
readerSerializeStateV2TensorFlow.GenOps.Core
readerSerializeStateV2'TensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readFile'TensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
readValueTensorFlow.Variable
readVariableOpTensorFlow.GenOps.Core
readVariableOp'TensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
real'TensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
realDiv'TensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocal'TensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reciprocalGrad'TensorFlow.GenOps.Core
recordInputTensorFlow.GenOps.Core
recordInput'TensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
reduceJoin'TensorFlow.GenOps.Core
reduceMeanTensorFlow.Ops
reduceMean'TensorFlow.Ops
reduceSumTensorFlow.Ops
reduceSum'TensorFlow.Ops
Ref 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
refEnterTensorFlow.GenOps.Core
refEnter'TensorFlow.GenOps.Core
referencedTensorProto.Tensorflow.Core.Framework.StepStats
refExitTensorFlow.GenOps.Core
refExit'TensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refIdentity'TensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refMerge'TensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refNextIteration'TensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSelect'TensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
refSwitch'TensorFlow.GenOps.Core
releaseProto.Tensorflow.Core.Util.TestLog
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6'TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
relu6Grad'TensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reluGrad' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
remoteFusedGraphExecuteTensorFlow.GenOps.Core
remoteFusedGraphExecute'TensorFlow.GenOps.Core
renderTensorFlow.Tensor, TensorFlow.Core
RenderedTensorFlow.Tensor
renderedNodeDefsTensorFlow.Build
renderedOutputTensorFlow.Tensor
renderValueTensorFlow.Tensor
repeatDatasetTensorFlow.GenOps.Core
repeatDataset'TensorFlow.GenOps.Core
requantizationRangeTensorFlow.GenOps.Core
requantizationRange'TensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
requantize'TensorFlow.GenOps.Core
requestedBytesProto.Tensorflow.Core.Framework.AllocationDescription
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reshape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeArea'TensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBicubic'TensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinear'TensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeBilinearGrad'TensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighbor'TensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resizeNearestNeighborGrad'TensorFlow.GenOps.Core
resourceApplyAdadeltaTensorFlow.GenOps.Core
resourceApplyAdadelta'TensorFlow.GenOps.Core
resourceApplyAdagradTensorFlow.GenOps.Core
resourceApplyAdagrad'TensorFlow.GenOps.Core
resourceApplyAdagradDATensorFlow.GenOps.Core
resourceApplyAdagradDA'TensorFlow.GenOps.Core
resourceApplyAdam 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
resourceApplyAdam' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceApplyFtrlTensorFlow.GenOps.Core
resourceApplyFtrl'TensorFlow.GenOps.Core
resourceApplyFtrlV2TensorFlow.GenOps.Core
resourceApplyFtrlV2'TensorFlow.GenOps.Core
resourceApplyGradientDescentTensorFlow.GenOps.Core
resourceApplyGradientDescent'TensorFlow.GenOps.Core
resourceApplyMomentumTensorFlow.GenOps.Core
resourceApplyMomentum'TensorFlow.GenOps.Core
resourceApplyProximalAdagradTensorFlow.GenOps.Core
resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceApplyRMSPropTensorFlow.GenOps.Core
resourceApplyRMSProp'TensorFlow.GenOps.Core
resourceGatherTensorFlow.GenOps.Core
resourceGather'TensorFlow.GenOps.Core
ResourceHandleTensorFlow.Types, TensorFlow.Core
ResourceHandleProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
2 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
resourceScatterAddTensorFlow.GenOps.Core
resourceScatterAdd'TensorFlow.GenOps.Core
resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
resourceSparseApplyAdagradTensorFlow.GenOps.Core
resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyAdagradDATensorFlow.GenOps.Core
resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceSparseApplyFtrlTensorFlow.GenOps.Core
resourceSparseApplyFtrl'TensorFlow.GenOps.Core
resourceSparseApplyFtrlV2TensorFlow.GenOps.Core
resourceSparseApplyFtrlV2'TensorFlow.GenOps.Core
resourceSparseApplyMomentumTensorFlow.GenOps.Core
resourceSparseApplyMomentum'TensorFlow.GenOps.Core
resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceSparseApplyRMSPropTensorFlow.GenOps.Core
resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
resourceStridedSliceAssignTensorFlow.GenOps.Core
resourceStridedSliceAssign'TensorFlow.GenOps.Core
RESOURCE_EXHAUSTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restore'TensorFlow.GenOps.Core
restoreFromNameTensorFlow.Ops
restoreOpNameProto.Tensorflow.Core.Protobuf.Saver
restoreSliceTensorFlow.GenOps.Core
restoreSlice'TensorFlow.GenOps.Core
restoreV2TensorFlow.GenOps.Core
restoreV2'TensorFlow.GenOps.Core
retProto.Tensorflow.Core.Framework.Function
reverseTensorFlow.GenOps.Core
reverse'TensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseSequence'TensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
reverseV2'TensorFlow.GenOps.Core
rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
RewriterConfig 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.RewriterConfig
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.RewriterConfig
RewriterConfig'HEURISTICSProto.Tensorflow.Core.Protobuf.RewriterConfig
RewriterConfig'MANUALProto.Tensorflow.Core.Protobuf.RewriterConfig
RewriterConfig'MemOptTypeProto.Tensorflow.Core.Protobuf.RewriterConfig
RewriterConfig'NO_MEM_OPTProto.Tensorflow.Core.Protobuf.RewriterConfig
rFFTTensorFlow.GenOps.Core
rFFT'TensorFlow.GenOps.Core
rFFT2DTensorFlow.GenOps.Core
rFFT2D'TensorFlow.GenOps.Core
rFFT3DTensorFlow.GenOps.Core
rFFT3D'TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rGBToHSV'TensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
rint'TensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
round'TensorFlow.GenOps.Core
RPCOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
rpcOptionsProto.Tensorflow.Core.Protobuf.Config
rsqrtTensorFlow.GenOps.Core
rsqrt'TensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
rsqrtGrad'TensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session, TensorFlow.Core
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
runConfigurationProto.Tensorflow.Core.Util.TestLog
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
runMetadataProto.Tensorflow.Core.Util.Event
runModeProto.Tensorflow.Core.Util.TestLog
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runRefTensorFlow.Tensor
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runTimeProto.Tensorflow.Core.Util.TestLog
runValueTensorFlow.Tensor
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
sampleDistortedBoundingBox'TensorFlow.GenOps.Core
sampleDistortedBoundingBoxV2TensorFlow.GenOps.Core
sampleDistortedBoundingBoxV2'TensorFlow.GenOps.Core
sampleRateProto.Tensorflow.Core.Framework.Summary
save 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
save'TensorFlow.GenOps.Core
SavedModel 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.SavedModel
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.SavedModel
savedModelSchemaVersionProto.Tensorflow.Core.Protobuf.SavedModel
SavedSlice 
1 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
SavedSliceMeta 
1 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
SavedTensorSliceMeta 
1 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
SavedTensorSlices 
1 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
SaverDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Saver
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Saver
saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
SaverDef'CheckpointFormatVersionProto.Tensorflow.Core.Protobuf.Saver
SaverDef'LEGACYProto.Tensorflow.Core.Protobuf.Saver
SaverDef'V1Proto.Tensorflow.Core.Protobuf.Saver
SaverDef'V2Proto.Tensorflow.Core.Protobuf.Saver
SaveSliceInfoDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Variable
2 (Type/Class)Proto.Tensorflow.Core.Framework.Variable
saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
saveSlicesTensorFlow.GenOps.Core
saveSlices'TensorFlow.GenOps.Core
saveTensorNameProto.Tensorflow.Core.Protobuf.Saver
saveV2TensorFlow.GenOps.Core
saveV2'TensorFlow.GenOps.Core
Scalar 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
scalarTensorFlow.Ops
scalar'TensorFlow.Ops
scalarizeTensorFlow.Ops
scalarSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
scalarSummary'TensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterAdd'TensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterDiv'TensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterMul'TensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNd'TensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdAdd'TensorFlow.GenOps.Core
scatterNdNonAliasingAddTensorFlow.GenOps.Core
scatterNdNonAliasingAdd'TensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdSub'TensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterNdUpdate'TensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterSub'TensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scatterUpdate'TensorFlow.GenOps.Core
scheduledMicrosProto.Tensorflow.Core.Framework.StepStats
scomplexValProto.Tensorflow.Core.Framework.Tensor
sdcaFprintTensorFlow.GenOps.Core
sdcaFprint'TensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaOptimizer'TensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
sdcaShrinkL1'TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMax'TensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMean'TensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentMin'TensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentProd'TensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
segmentSum'TensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
select'TensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEig'TensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
selfAdjointEigV2'TensorFlow.GenOps.Core
SequenceExample 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Example
2 (Type/Class)Proto.Tensorflow.Core.Example.Example
serialIdentifierProto.Tensorflow.Core.Util.TestLog
serializeManySparseTensorFlow.GenOps.Core
serializeManySparse'TensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
serializeSparse'TensorFlow.GenOps.Core
ServerDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorflowServer
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorflowServer
Session 
1 (Type/Class)TensorFlow.Session, TensorFlow.Core
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
SessionLog 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
sessionLogProto.Tensorflow.Core.Util.Event
SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
SessionLog'STARTProto.Tensorflow.Core.Util.Event
SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
SessionLog'STOPProto.Tensorflow.Core.Util.Event
SessionTTensorFlow.Session
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
setSizeTensorFlow.GenOps.Core
setSize'TensorFlow.GenOps.Core
Shape 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
4 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
5 (Function)Proto.Tensorflow.Core.Framework.AttrValue
6 (Function)Proto.Tensorflow.Core.Framework.CostGraph
7 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
8 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
shape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
shapeNTensorFlow.GenOps.Core
shapeN'TensorFlow.GenOps.Core
shapesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
shardedProto.Tensorflow.Core.Protobuf.Saver
shardedFilenameTensorFlow.GenOps.Core
shardedFilename'TensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
shardedFilespec'TensorFlow.GenOps.Core
shardIdProto.Tensorflow.Core.Protobuf.TensorBundle
shuffleDatasetTensorFlow.GenOps.Core
shuffleDataset'TensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoid'TensorFlow.GenOps.Core
sigmoidCrossEntropyWithLogitsTensorFlow.NN
sigmoidGradTensorFlow.GenOps.Core
sigmoidGrad'TensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
signatureProto.Tensorflow.Core.Framework.Function
SignatureDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
signatureDefProto.Tensorflow.Core.Protobuf.MetaGraph
SignatureDef'InputsEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
SignatureDef'OutputsEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
SimpleArgTensorFlow.OpGen.ParsedOp
simpleValueProto.Tensorflow.Core.Framework.Summary
sinTensorFlow.GenOps.Core
sin'TensorFlow.GenOps.Core
sinhTensorFlow.GenOps.Core
sinh'TensorFlow.GenOps.Core
sinkTFRecordsTensorFlow.Records.Conduit
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
4 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
5 (Function)Proto.Tensorflow.Core.Framework.TensorShape
size' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
skipDatasetTensorFlow.GenOps.Core
skipDataset'TensorFlow.GenOps.Core
skipgramTensorFlow.GenOps.Core
skipgram'TensorFlow.GenOps.Core
slice 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
slice'TensorFlow.GenOps.Core
slicesProto.Tensorflow.Core.Protobuf.TensorBundle
slotProto.Tensorflow.Core.Framework.StepStats
snapshotProto.Tensorflow.Core.Util.TestLog
snapshotNameProto.Tensorflow.Core.Framework.Variable
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplus'TensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softplusGrad'TensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsign'TensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
softsignGrad'TensorFlow.GenOps.Core
sourceTFRecordsTensorFlow.Records.Conduit
spaceToBatchTensorFlow.GenOps.Core
spaceToBatch'TensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToBatchND'TensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
spaceToDepth'TensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAdd'TensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseAddGrad'TensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdadelta'TensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagrad'TensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyAdagradDA'TensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyFtrl'TensorFlow.GenOps.Core
sparseApplyFtrlV2TensorFlow.GenOps.Core
sparseApplyFtrlV2'TensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyMomentum'TensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseApplyRMSProp'TensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseConcat'TensorFlow.GenOps.Core
sparseConditionalAccumulatorTensorFlow.GenOps.Core
sparseConditionalAccumulator'TensorFlow.GenOps.Core
sparseCrossTensorFlow.GenOps.Core
sparseCross'TensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseAdd'TensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseDiv'TensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseDenseCwiseMul'TensorFlow.GenOps.Core
sparseFillEmptyRowsTensorFlow.GenOps.Core
sparseFillEmptyRows'TensorFlow.GenOps.Core
sparseFillEmptyRowsGradTensorFlow.GenOps.Core
sparseFillEmptyRowsGrad'TensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseMatMul'TensorFlow.GenOps.Core
sparseReduceMaxTensorFlow.GenOps.Core
sparseReduceMax'TensorFlow.GenOps.Core
sparseReduceMaxSparseTensorFlow.GenOps.Core
sparseReduceMaxSparse'TensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSum'TensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReduceSumSparse'TensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReorder'TensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseReshape'TensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMean'TensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentMeanGrad'TensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtN'TensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSegmentSum'TensorFlow.GenOps.Core
sparseSliceTensorFlow.GenOps.Core
sparseSlice'TensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmax'TensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMaximum'TensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSparseMinimum'TensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseSplit'TensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseAdd'TensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseTensorDenseMatMul'TensorFlow.GenOps.Core
sparseTensorSliceDatasetTensorFlow.GenOps.Core
sparseTensorSliceDataset'TensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToDense' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToSparseSetOperationTensorFlow.GenOps.Core
sparseToSparseSetOperation'TensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
split'TensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
splitV'TensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrt'TensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
sqrtGrad'TensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
square'TensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squaredDifference'TensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
squeeze'TensorFlow.GenOps.Core
stackTensorFlow.GenOps.Core
stack'TensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackClose'TensorFlow.GenOps.Core
stackCloseV2TensorFlow.GenOps.Core
stackCloseV2'TensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPop'TensorFlow.GenOps.Core
stackPopV2TensorFlow.GenOps.Core
stackPopV2'TensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stackPush'TensorFlow.GenOps.Core
stackPushV2TensorFlow.GenOps.Core
stackPushV2'TensorFlow.GenOps.Core
stackV2TensorFlow.GenOps.Core
stackV2'TensorFlow.GenOps.Core
stageTensorFlow.GenOps.Core
stage'TensorFlow.GenOps.Core
stageClearTensorFlow.GenOps.Core
stageClear'TensorFlow.GenOps.Core
stagePeekTensorFlow.GenOps.Core
stagePeek'TensorFlow.GenOps.Core
stageSizeTensorFlow.GenOps.Core
stageSize'TensorFlow.GenOps.Core
startProto.Tensorflow.Core.Framework.TensorSlice
startTimeProto.Tensorflow.Core.Util.TestLog
statelessRandomNormalTensorFlow.GenOps.Core
statelessRandomNormal'TensorFlow.GenOps.Core
statelessRandomUniformTensorFlow.GenOps.Core
statelessRandomUniform'TensorFlow.GenOps.Core
statelessTruncatedNormalTensorFlow.GenOps.Core
statelessTruncatedNormal'TensorFlow.GenOps.Core
statusProto.Tensorflow.Core.Util.Event
stepProto.Tensorflow.Core.Util.Event
stepIdProto.Tensorflow.Core.Framework.LogMemory
StepStats 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stopGradient'TensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSlice'TensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceAssign'TensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stridedSliceGrad'TensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringJoin'TensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringSplit'TensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucket'TensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketFast'TensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToHashBucketStrong'TensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringToNumber'TensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
stringValueProto.Tensorflow.Core.Util.TestLog
strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sub' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
substrTensorFlow.GenOps.Core
substr'TensorFlow.GenOps.Core
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.Summary
sum' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summariesTensorFlow.Build
Summary 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
summary 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Util.Event
Summary'Audio 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Image 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Value 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Value'AudioProto.Tensorflow.Core.Framework.Summary
Summary'Value'HistoProto.Tensorflow.Core.Framework.Summary
Summary'Value'ImageProto.Tensorflow.Core.Framework.Summary
Summary'Value'ObsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
Summary'Value'SimpleValueProto.Tensorflow.Core.Framework.Summary
Summary'Value'TensorProto.Tensorflow.Core.Framework.Summary
Summary'Value'ValueProto.Tensorflow.Core.Framework.Summary
SummaryDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
summaryDescriptionProto.Tensorflow.Core.Framework.Summary
SummaryMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryMetadata'PluginData 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryTensor 
1 (Type/Class)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Logging
sumSquaresProto.Tensorflow.Core.Framework.Summary
svdTensorFlow.GenOps.Core
svd'TensorFlow.GenOps.Core
swapMemoryProto.Tensorflow.Core.Protobuf.ControlFlow
switchTensorFlow.GenOps.Core
switch'TensorFlow.GenOps.Core
systemProto.Tensorflow.Core.Util.TestLog
tag 
1 (Function)Proto.Tensorflow.Core.Util.Event
2 (Function)Proto.Tensorflow.Core.Framework.Summary
TaggedRunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
taggedRunMetadataProto.Tensorflow.Core.Util.Event
tagsProto.Tensorflow.Core.Protobuf.MetaGraph
takeDatasetTensorFlow.GenOps.Core
takeDataset'TensorFlow.GenOps.Core
takeManySparseFromTensorsMapTensorFlow.GenOps.Core
takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tan'TensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanh'TensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
tanhGrad'TensorFlow.GenOps.Core
targetProto.Tensorflow.Core.Util.TestLog
taskIndexProto.Tensorflow.Core.Protobuf.TensorflowServer
tasksProto.Tensorflow.Core.Protobuf.Cluster
temporaryMemorySizeProto.Tensorflow.Core.Framework.CostGraph
temporaryVariableTensorFlow.GenOps.Core
temporaryVariable'TensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
tensor 
1 (Function)Proto.Tensorflow.Core.Framework.LogMemory
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
3 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
4 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
5 (Function)Proto.Tensorflow.Core.Framework.Summary
tensorArrayTensorFlow.GenOps.Core
tensorArray'TensorFlow.GenOps.Core
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayClose'TensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayCloseV2'TensorFlow.GenOps.Core
tensorArrayCloseV3TensorFlow.GenOps.Core
tensorArrayCloseV3'TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcat'TensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayConcatV2'TensorFlow.GenOps.Core
tensorArrayConcatV3TensorFlow.GenOps.Core
tensorArrayConcatV3'TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGather'TensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGatherV2'TensorFlow.GenOps.Core
tensorArrayGatherV3TensorFlow.GenOps.Core
tensorArrayGatherV3'TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGrad'TensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayGradV2'TensorFlow.GenOps.Core
tensorArrayGradV3TensorFlow.GenOps.Core
tensorArrayGradV3'TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayPack'TensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayRead'TensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayReadV2'TensorFlow.GenOps.Core
tensorArrayReadV3TensorFlow.GenOps.Core
tensorArrayReadV3'TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatter'TensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArrayScatterV2'TensorFlow.GenOps.Core
tensorArrayScatterV3TensorFlow.GenOps.Core
tensorArrayScatterV3'TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySize'TensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySizeV2'TensorFlow.GenOps.Core
tensorArraySizeV3TensorFlow.GenOps.Core
tensorArraySizeV3'TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplit'TensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArraySplitV2'TensorFlow.GenOps.Core
tensorArraySplitV3TensorFlow.GenOps.Core
tensorArraySplitV3'TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayUnpack'TensorFlow.GenOps.Core
tensorArrayV2TensorFlow.GenOps.Core
tensorArrayV2'TensorFlow.GenOps.Core
tensorArrayV3TensorFlow.GenOps.Core
tensorArrayV3'TensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWrite'TensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorArrayWriteV2'TensorFlow.GenOps.Core
tensorArrayWriteV3TensorFlow.GenOps.Core
tensorArrayWriteV3'TensorFlow.GenOps.Core
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
tensorDatasetTensorFlow.GenOps.Core
tensorDataset'TensorFlow.GenOps.Core
TensorDataTypeTensorFlow.Types, TensorFlow.Core
tensorDataTypeTensorFlow.Internal.FFI
TensorDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorDescription
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorDescription
tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorflowGitVersionProto.Tensorflow.Core.Protobuf.MetaGraph
tensorflowVersionProto.Tensorflow.Core.Protobuf.MetaGraph
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
TensorInfo'CooSparse 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
TensorInfo'CooSparse'Proto.Tensorflow.Core.Protobuf.MetaGraph
TensorInfo'EncodingProto.Tensorflow.Core.Protobuf.MetaGraph
TensorInfo'NameProto.Tensorflow.Core.Protobuf.MetaGraph
TensorKindTensorFlow.Tensor
TensorListTensorFlow.Tensor
tensorListOutputsTensorFlow.Tensor
tensorNodeNameTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefFromNameTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
tensorShape 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSliceDatasetTensorFlow.GenOps.Core
tensorSliceDataset'TensorFlow.GenOps.Core
TensorSliceProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorSlice
TensorSliceProto'Extent 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorSlice
TensorSliceProto'Extent'HasLengthProto.Tensorflow.Core.Framework.TensorSlice
TensorSliceProto'Extent'LengthProto.Tensorflow.Core.Framework.TensorSlice
tensorSummaryTensorFlow.GenOps.Core
tensorSummary'TensorFlow.GenOps.Core
tensorSummaryV2TensorFlow.GenOps.Core
tensorSummaryV2'TensorFlow.GenOps.Core
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypeListTensorFlow.Types
TensorTypeProxy 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
TensorTypesTensorFlow.Types
tensorTypesTensorFlow.Types
tensorValTensorFlow.Types
tensorValueFromNameTensorFlow.Tensor
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
TestResults 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
TestResults'ANDROID_BENCHMARKProto.Tensorflow.Core.Util.TestLog
TestResults'BenchmarkTypeProto.Tensorflow.Core.Util.TestLog
TestResults'CPP_MICROBENCHMARKProto.Tensorflow.Core.Util.TestLog
TestResults'PYTHON_BENCHMARKProto.Tensorflow.Core.Util.TestLog
TestResults'UNKNOWNProto.Tensorflow.Core.Util.TestLog
textLineDatasetTensorFlow.GenOps.Core
textLineDataset'TensorFlow.GenOps.Core
textLineReaderTensorFlow.GenOps.Core
textLineReader'TensorFlow.GenOps.Core
textLineReaderV2TensorFlow.GenOps.Core
textLineReaderV2'TensorFlow.GenOps.Core
TFName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
tfNameTensorFlow.OpGen.ParsedOp
tFRecordDatasetTensorFlow.GenOps.Core
tFRecordDataset'TensorFlow.GenOps.Core
tFRecordReaderTensorFlow.GenOps.Core
tFRecordReader'TensorFlow.GenOps.Core
tFRecordReaderV2TensorFlow.GenOps.Core
tFRecordReaderV2'TensorFlow.GenOps.Core
threadIdProto.Tensorflow.Core.Framework.StepStats
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
throughputProto.Tensorflow.Core.Util.TestLog
tileTensorFlow.GenOps.Core
tile'TensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
tileGrad'TensorFlow.GenOps.Core
timelineLabelProto.Tensorflow.Core.Framework.StepStats
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
toBuildTensorFlow.Tensor
tolerateDebugOpCreationFailuresProto.Tensorflow.Core.Protobuf.Debug
topKTensorFlow.GenOps.Core
topK'TensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
topKV2'TensorFlow.GenOps.Core
totalProto.Tensorflow.Core.Util.TestLog
totalBytesProto.Tensorflow.Core.Framework.StepStats
ToTensorTensorFlow.Tensor
toTensorTensorFlow.Tensor
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
transpose' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateDivTensorFlow.GenOps.Core
truncateDiv'TensorFlow.GenOps.Core
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncatedNormal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateModTensorFlow.GenOps.Core
truncateMod'TensorFlow.GenOps.Core
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
3 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
4 (Function)Proto.Tensorflow.Core.Util.TestLog
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeHintProto.Tensorflow.Core.Framework.Summary
typeListAttrProto.Tensorflow.Core.Framework.OpDef
TypeParam 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
typeParamIsListTensorFlow.OpGen.ParsedOp
typeParamRestrictionsTensorFlow.OpGen.ParsedOp
UNAUTHENTICATEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
UNAVAILABLEProto.Tensorflow.Core.Lib.Core.ErrorCodes
unControlNodeTensorFlow.Output, TensorFlow.Build
unHaskellNameTensorFlow.OpGen.ParsedOp
uniformCandidateSamplerTensorFlow.GenOps.Core
uniformCandidateSampler'TensorFlow.GenOps.Core
UNIMPLEMENTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
UniqueTensorFlow.Build
uniqueTensorFlow.GenOps.Core
unique'TensorFlow.GenOps.Core
uniqueWithCountsTensorFlow.GenOps.Core
uniqueWithCounts'TensorFlow.GenOps.Core
UNKNOWNProto.Tensorflow.Core.Lib.Core.ErrorCodes
unknownRankProto.Tensorflow.Core.Framework.TensorShape
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
unpackTensorFlow.GenOps.Core
unpack'TensorFlow.GenOps.Core
unScalarTensorFlow.Types, TensorFlow.Core
unsortedSegmentMaxTensorFlow.GenOps.Core
unsortedSegmentMax'TensorFlow.GenOps.Core
unsortedSegmentSumTensorFlow.GenOps.Core
unsortedSegmentSum'TensorFlow.GenOps.Core
unstageTensorFlow.GenOps.Core
unstage'TensorFlow.GenOps.Core
unTensorDataTensorFlow.Types
unTFNameTensorFlow.OpGen.ParsedOp
usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
uuidProto.Tensorflow.Core.Util.TestLog
Value 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
value 
1 (Function)TensorFlow.Tensor, TensorFlow.Core
2 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
3 (Function)Proto.Tensorflow.Core.Example.Feature
4 (Function)Proto.Tensorflow.Core.Protobuf.Config
5 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
6 (Function)Proto.Tensorflow.Core.Framework.Function
7 (Function)Proto.Tensorflow.Core.Framework.NodeDef
8 (Function)Proto.Tensorflow.Core.Framework.AttrValue
9 (Function)Proto.Tensorflow.Core.Framework.Summary
10 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
11 (Function)Proto.Tensorflow.Core.Protobuf.ControlFlow
12 (Function)Proto.Tensorflow.Core.Util.TestLog
valuesProto.Tensorflow.Core.Protobuf.ControlFlow
ValuesDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
ValuesDef'ExternalValuesEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
valuesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
varHandleOpTensorFlow.GenOps.Core
varHandleOp'TensorFlow.GenOps.Core
VariableTensorFlow.Variable
variable 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
3 (Function)TensorFlow.Ops
variable' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
3 (Function)TensorFlow.Ops
VariableDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Variable
2 (Type/Class)Proto.Tensorflow.Core.Framework.Variable
variableNameProto.Tensorflow.Core.Framework.Variable
variableV2TensorFlow.GenOps.Core
variableV2'TensorFlow.GenOps.Core
varIsInitializedOpTensorFlow.GenOps.Core
varIsInitializedOp'TensorFlow.GenOps.Core
varLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
VarLenFeatureProto 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
varOffsetProto.Tensorflow.Core.Framework.Variable
varShapeProto.Tensorflow.Core.Framework.Variable
vectorTensorFlow.Ops
vector'TensorFlow.Ops
version 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
3 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
4 (Function)Proto.Tensorflow.Core.Protobuf.Saver
5 (Function)Proto.Tensorflow.Core.Util.TestLog
VersionDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Versions
2 (Type/Class)Proto.Tensorflow.Core.Framework.Versions
versionNumberProto.Tensorflow.Core.Framework.Tensor
versions 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
wallTime 
1 (Function)Proto.Tensorflow.Core.Util.Event
2 (Function)Proto.Tensorflow.Core.Util.TestLog
where'TensorFlow.GenOps.Core
where''TensorFlow.GenOps.Core
WhileContextDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
wholeFileReaderTensorFlow.GenOps.Core
wholeFileReader'TensorFlow.GenOps.Core
wholeFileReaderV2TensorFlow.GenOps.Core
wholeFileReaderV2'TensorFlow.GenOps.Core
widthProto.Tensorflow.Core.Framework.Summary
withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
withDeviceTensorFlow.Build, TensorFlow.Core
withEventWriterTensorFlow.Logging
withNameScopeTensorFlow.Build, TensorFlow.Core
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
writeFileTensorFlow.GenOps.Core
writeFile'TensorFlow.GenOps.Core
wtsCkptTensorFlow.Examples.MNIST.TrainedGraph
zeroInitializedVariable 
1 (Function)TensorFlow.Variable
2 (Function)TensorFlow.Ops
zeroInitializedVariable' 
1 (Function)TensorFlow.Variable
2 (Function)TensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zerosLike' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
zeta'TensorFlow.GenOps.Core
zipDatasetTensorFlow.GenOps.Core
zipDataset'TensorFlow.GenOps.Core
\\TensorFlow.Types
_AllocationDescription'allocatedBytesProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'allocationIdProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'allocatorNameProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'hasSingleReferenceProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'ptrProto.Tensorflow.Core.Framework.AllocationDescription
_AllocationDescription'requestedBytesProto.Tensorflow.Core.Framework.AllocationDescription
_AllocatorMemoryUsed'allocatorBytesInUseProto.Tensorflow.Core.Framework.StepStats
_AllocatorMemoryUsed'allocatorNameProto.Tensorflow.Core.Framework.StepStats
_AllocatorMemoryUsed'liveBytesProto.Tensorflow.Core.Framework.StepStats
_AllocatorMemoryUsed'peakBytesProto.Tensorflow.Core.Framework.StepStats
_AllocatorMemoryUsed'totalBytesProto.Tensorflow.Core.Framework.StepStats
_ArgTensorFlow.GenOps.Core
_Arg'TensorFlow.GenOps.Core
_ArrayToListTensorFlow.GenOps.Core
_ArrayToList'TensorFlow.GenOps.Core
_AssetFileDef'filenameProto.Tensorflow.Core.Protobuf.MetaGraph
_AssetFileDef'tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'valueProto.Tensorflow.Core.Framework.AttrValue
_AutoParallelOptions'enableProto.Tensorflow.Core.Protobuf.RewriterConfig
_AutoParallelOptions'numReplicasProto.Tensorflow.Core.Protobuf.RewriterConfig
_AvailableDeviceInfo'memoryLimitProto.Tensorflow.Core.Util.TestLog
_AvailableDeviceInfo'nameProto.Tensorflow.Core.Util.TestLog
_AvailableDeviceInfo'physicalDescriptionProto.Tensorflow.Core.Util.TestLog
_AvailableDeviceInfo'type'Proto.Tensorflow.Core.Util.TestLog
_BenchmarkEntries'entryProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'cpuTimeProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'extrasProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'ExtrasEntry'keyProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'ExtrasEntry'valueProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'itersProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'nameProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'throughputProto.Tensorflow.Core.Util.TestLog
_BenchmarkEntry'wallTimeProto.Tensorflow.Core.Util.TestLog
_BuildConfiguration'ccFlagsProto.Tensorflow.Core.Util.TestLog
_BuildConfiguration'modeProto.Tensorflow.Core.Util.TestLog
_BuildConfiguration'optsProto.Tensorflow.Core.Util.TestLog
_BundleEntryProto'crc32cProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'dtypeProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'offsetProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'shapeProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'shardIdProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'sizeProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleEntryProto'slicesProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleHeaderProto'endiannessProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleHeaderProto'numShardsProto.Tensorflow.Core.Protobuf.TensorBundle
_BundleHeaderProto'versionProto.Tensorflow.Core.Protobuf.TensorBundle
_BytesList'valueProto.Tensorflow.Core.Example.Feature
_ClusterDef'jobProto.Tensorflow.Core.Protobuf.Cluster
_CollectionDef'AnyList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'BytesList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'FloatList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'Int64List'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'kindProto.Tensorflow.Core.Protobuf.MetaGraph
_CollectionDef'NodeList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_CommitId'kindProto.Tensorflow.Core.Util.TestLog
_CommitId'snapshotProto.Tensorflow.Core.Util.TestLog
_CondContextDef'branchProto.Tensorflow.Core.Protobuf.ControlFlow
_CondContextDef'contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
_CondContextDef'pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
_CondContextDef'predNameProto.Tensorflow.Core.Protobuf.ControlFlow
_CondContextDef'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'clusterDefProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_CostGraphDef'nodeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'computeCostProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'computeTimeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'controlInputProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'deviceProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'devicePersistentMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'deviceTempMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'hostPersistentMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'hostTempMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'idProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'inputInfoProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'InputInfo'precedingNodeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'InputInfo'precedingPortProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'isFinalProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'memoryTimeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'nameProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'outputInfoProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'OutputInfo'aliasInputPortProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'OutputInfo'dtypeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'OutputInfo'shapeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'OutputInfo'sizeProto.Tensorflow.Core.Framework.CostGraph
_CostGraphDef'Node'temporaryMemorySizeProto.Tensorflow.Core.Framework.CostGraph
_CPUInfo'cacheSizeProto.Tensorflow.Core.Util.TestLog
_CPUInfo'CacheSizeEntry'keyProto.Tensorflow.Core.Util.TestLog
_CPUInfo'CacheSizeEntry'valueProto.Tensorflow.Core.Util.TestLog
_CPUInfo'cpuGovernorProto.Tensorflow.Core.Util.TestLog
_CPUInfo'cpuInfoProto.Tensorflow.Core.Util.TestLog
_CPUInfo'mhzPerCpuProto.Tensorflow.Core.Util.TestLog
_CPUInfo'numCoresProto.Tensorflow.Core.Util.TestLog
_CPUInfo'numCoresAllowedProto.Tensorflow.Core.Util.TestLog
_DebugOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Debug
_DebugOptions'globalStepProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Debug
_DebugTensorWatch'tolerateDebugOpCreationFailuresProto.Tensorflow.Core.Protobuf.Debug
_DeviceAttributes'deviceTypeProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'incarnationProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'localityProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'memoryLimitProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'nameProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceAttributes'physicalDeviceDescProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceLocality'busIdProto.Tensorflow.Core.Framework.DeviceAttributes
_DeviceStepStats'deviceProto.Tensorflow.Core.Framework.StepStats
_DeviceStepStats'nodeStatsProto.Tensorflow.Core.Framework.StepStats
_EntryValue'kindProto.Tensorflow.Core.Util.TestLog
_Event'stepProto.Tensorflow.Core.Util.Event
_Event'wallTimeProto.Tensorflow.Core.Util.Event
_Event'whatProto.Tensorflow.Core.Util.Event
_Example'featuresProto.Tensorflow.Core.Example.Example
_ExampleParserConfiguration'featureMapProto.Tensorflow.Core.Example.ExampleParserConfiguration
_ExampleParserConfiguration'FeatureMapEntry'keyProto.Tensorflow.Core.Example.ExampleParserConfiguration
_ExampleParserConfiguration'FeatureMapEntry'valueProto.Tensorflow.Core.Example.ExampleParserConfiguration
_Feature'kindProto.Tensorflow.Core.Example.Feature
_FeatureConfiguration'configProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FeatureList'featureProto.Tensorflow.Core.Example.Feature
_FeatureLists'featureListProto.Tensorflow.Core.Example.Feature
_FeatureLists'FeatureListEntry'keyProto.Tensorflow.Core.Example.Feature
_FeatureLists'FeatureListEntry'valueProto.Tensorflow.Core.Example.Feature
_Features'featureProto.Tensorflow.Core.Example.Feature
_Features'FeatureEntry'keyProto.Tensorflow.Core.Example.Feature
_Features'FeatureEntry'valueProto.Tensorflow.Core.Example.Feature
_FixedLenFeatureProto'defaultValueProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FixedLenFeatureProto'dtypeProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FixedLenFeatureProto'shapeProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FixedLenFeatureProto'valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
_FloatList'valueProto.Tensorflow.Core.Example.Feature
_FunctionDef'attrProto.Tensorflow.Core.Framework.Function
_FunctionDef'AttrEntry'keyProto.Tensorflow.Core.Framework.Function
_FunctionDef'AttrEntry'valueProto.Tensorflow.Core.Framework.Function
_FunctionDef'nodeDefProto.Tensorflow.Core.Framework.Function
_FunctionDef'retProto.Tensorflow.Core.Framework.Function
_FunctionDef'RetEntry'keyProto.Tensorflow.Core.Framework.Function
_FunctionDef'RetEntry'valueProto.Tensorflow.Core.Framework.Function
_FunctionDef'signatureProto.Tensorflow.Core.Framework.Function
_FunctionDefLibrary'functionProto.Tensorflow.Core.Framework.Function
_FunctionDefLibrary'gradientProto.Tensorflow.Core.Framework.Function
_GPUInfo'busIdProto.Tensorflow.Core.Util.TestLog
_GPUInfo'modelProto.Tensorflow.Core.Util.TestLog
_GPUInfo'uuidProto.Tensorflow.Core.Util.TestLog
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'forceGpuCompatibleProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'pollingActiveDelayUsecsProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'pollingInactiveDelayMsecsProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GradientDef'functionNameProto.Tensorflow.Core.Framework.Function
_GradientDef'gradientFuncProto.Tensorflow.Core.Framework.Function
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
_HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
_HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
_HistogramProto'minProto.Tensorflow.Core.Framework.Summary
_HistogramProto'numProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
_HostCastTensorFlow.GenOps.Core
_HostCast'TensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostRecv'TensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_HostSend'TensorFlow.GenOps.Core
_Int64List'valueProto.Tensorflow.Core.Example.Feature
_JobDef'nameProto.Tensorflow.Core.Protobuf.Cluster
_JobDef'tasksProto.Tensorflow.Core.Protobuf.Cluster
_JobDef'TasksEntry'keyProto.Tensorflow.Core.Protobuf.Cluster
_JobDef'TasksEntry'valueProto.Tensorflow.Core.Protobuf.Cluster
_KernelDef'AttrConstraint'allowedValuesProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'AttrConstraint'nameProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'constraintProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'deviceTypeProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'hostMemoryArgProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'labelProto.Tensorflow.Core.Framework.KernelDef
_KernelDef'opProto.Tensorflow.Core.Framework.KernelDef
_ListToArrayTensorFlow.GenOps.Core
_ListToArray'TensorFlow.GenOps.Core
_LogMessage'levelProto.Tensorflow.Core.Util.Event
_LogMessage'messageProto.Tensorflow.Core.Util.Event
_MachineConfiguration'availableDeviceInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'cpuInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'deviceInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'hostnameProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'memoryInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'platformInfoProto.Tensorflow.Core.Util.TestLog
_MachineConfiguration'serialIdentifierProto.Tensorflow.Core.Util.TestLog
_MemmappedFileSystemDirectory'elementProto.Tensorflow.Core.Util.MemmappedFileSystem
_MemmappedFileSystemDirectoryElement'nameProto.Tensorflow.Core.Util.MemmappedFileSystem
_MemmappedFileSystemDirectoryElement'offsetProto.Tensorflow.Core.Util.MemmappedFileSystem
_MemoryInfo'availableProto.Tensorflow.Core.Util.TestLog
_MemoryInfo'totalProto.Tensorflow.Core.Util.TestLog
_MemoryLogRawAllocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'numBytesProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'operationProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'ptrProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawAllocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'deferredProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'operationProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogRawDeallocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogStep'handleProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogStep'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorAllocation'kernelNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorAllocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorAllocation'tensorProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorDeallocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorDeallocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorOutput'indexProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorOutput'kernelNameProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorOutput'stepIdProto.Tensorflow.Core.Framework.LogMemory
_MemoryLogTensorOutput'tensorProto.Tensorflow.Core.Framework.LogMemory
_MemoryStats'devicePersistentMemorySizeProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'devicePersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'deviceTempMemorySizeProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'hostPersistentMemorySizeProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'hostPersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
_MemoryStats'hostTempMemorySizeProto.Tensorflow.Core.Framework.StepStats
_MetaGraphDef'assetFileDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'collectionDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'CollectionDefEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'CollectionDefEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'graphDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'metaGraphVersionProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'tagsProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'tensorflowGitVersionProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'MetaInfoDef'tensorflowVersionProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'signatureDefProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'SignatureDefEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
_MetaGraphDef'SignatureDefEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NamedTensorProto'nameProto.Tensorflow.Core.Protobuf.NamedTensor
_NamedTensorProto'tensorProto.Tensorflow.Core.Protobuf.NamedTensor
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_NodeExecStats'allEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'allStartMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'memoryProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'memoryStatsProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'nodeNameProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'opEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'opStartRelMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'outputProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'referencedTensorProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'scheduledMicrosProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'threadIdProto.Tensorflow.Core.Framework.StepStats
_NodeExecStats'timelineLabelProto.Tensorflow.Core.Framework.StepStats
_NodeOutput'slotProto.Tensorflow.Core.Framework.StepStats
_NodeOutput'tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_ParallelConcatStartTensorFlow.GenOps.Core
_ParallelConcatStart'TensorFlow.GenOps.Core
_ParallelConcatUpdateTensorFlow.GenOps.Core
_ParallelConcatUpdate'TensorFlow.GenOps.Core
_PlatformInfo'bitsProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'linkageProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'machineProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'releaseProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'systemProto.Tensorflow.Core.Util.TestLog
_PlatformInfo'versionProto.Tensorflow.Core.Util.TestLog
_QueueRunnerDef'cancelOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
_QueueRunnerDef'closeOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
_QueueRunnerDef'enqueueOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
_QueueRunnerDef'queueClosedExceptionTypesProto.Tensorflow.Core.Protobuf.QueueRunner
_QueueRunnerDef'queueNameProto.Tensorflow.Core.Protobuf.QueueRunner
_RecvTensorFlow.GenOps.Core
_Recv'TensorFlow.GenOps.Core
_ResourceHandleProto'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandleProto'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandleProto'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandleProto'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandleProto'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_Retval'TensorFlow.GenOps.Core
_RewriterConfig'autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'constantFoldingProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'disableModelPruningProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'memoryOptimizationProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'optimizersProto.Tensorflow.Core.Protobuf.RewriterConfig
_RewriterConfig'optimizeTensorLayoutProto.Tensorflow.Core.Protobuf.RewriterConfig
_RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
_RunConfiguration'argumentProto.Tensorflow.Core.Util.TestLog
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SavedModel'metaGraphsProto.Tensorflow.Core.Protobuf.SavedModel
_SavedModel'savedModelSchemaVersionProto.Tensorflow.Core.Protobuf.SavedModel
_SavedSlice'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSlice'nameProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSlice'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSliceMeta'nameProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSliceMeta'shapeProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSliceMeta'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedSliceMeta'type'Proto.Tensorflow.Core.Util.SavedTensorSlice
_SavedTensorSliceMeta'tensorProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedTensorSliceMeta'versionsProto.Tensorflow.Core.Util.SavedTensorSlice
_SavedTensorSlices'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
_SavedTensorSlices'metaProto.Tensorflow.Core.Util.SavedTensorSlice
_SaverDef'filenameTensorNameProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'keepCheckpointEveryNHoursProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'maxToKeepProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'restoreOpNameProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'saveTensorNameProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'shardedProto.Tensorflow.Core.Protobuf.Saver
_SaverDef'versionProto.Tensorflow.Core.Protobuf.Saver
_SaveSliceInfoDef'fullNameProto.Tensorflow.Core.Framework.Variable
_SaveSliceInfoDef'fullShapeProto.Tensorflow.Core.Framework.Variable
_SaveSliceInfoDef'varOffsetProto.Tensorflow.Core.Framework.Variable
_SaveSliceInfoDef'varShapeProto.Tensorflow.Core.Framework.Variable
_SendTensorFlow.GenOps.Core
_Send'TensorFlow.GenOps.Core
_SequenceExample'contextProto.Tensorflow.Core.Example.Example
_SequenceExample'featureListsProto.Tensorflow.Core.Example.Example
_ServerDef'clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
_ServerDef'defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
_ServerDef'jobNameProto.Tensorflow.Core.Protobuf.TensorflowServer
_ServerDef'protocolProto.Tensorflow.Core.Protobuf.TensorflowServer
_ServerDef'taskIndexProto.Tensorflow.Core.Protobuf.TensorflowServer
_SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
_SessionLog'msgProto.Tensorflow.Core.Util.Event
_SessionLog'statusProto.Tensorflow.Core.Util.Event
_SignatureDef'inputsProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'InputsEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'InputsEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'methodNameProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'outputsProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'OutputsEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
_SignatureDef'OutputsEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
_StepStats'devStatsProto.Tensorflow.Core.Framework.StepStats
_Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
_Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
_Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
_Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
_Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
_Summary'valueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'metadataProto.Tensorflow.Core.Framework.Summary
_Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
_Summary'Value'valueProto.Tensorflow.Core.Framework.Summary
_SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'displayNameProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'pluginDataProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'PluginData'contentProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'PluginData'pluginNameProto.Tensorflow.Core.Framework.Summary
_SummaryMetadata'summaryDescriptionProto.Tensorflow.Core.Framework.Summary
_TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
_TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
_TensorDescription'allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
_TensorDescription'dtypeProto.Tensorflow.Core.Framework.TensorDescription
_TensorDescription'shapeProto.Tensorflow.Core.Framework.TensorDescription
_TensorInfo'CooSparse'denseShapeTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'CooSparse'indicesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'CooSparse'valuesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'dtypeProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'encodingProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorInfo'tensorShapeProto.Tensorflow.Core.Protobuf.MetaGraph
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_TensorSliceProto'extentProto.Tensorflow.Core.Framework.TensorSlice
_TensorSliceProto'Extent'hasLengthProto.Tensorflow.Core.Framework.TensorSlice
_TensorSliceProto'Extent'startProto.Tensorflow.Core.Framework.TensorSlice
_TestResults'benchmarkTypeProto.Tensorflow.Core.Util.TestLog
_TestResults'buildConfigurationProto.Tensorflow.Core.Util.TestLog
_TestResults'commitIdProto.Tensorflow.Core.Util.TestLog
_TestResults'entriesProto.Tensorflow.Core.Util.TestLog
_TestResults'machineConfigurationProto.Tensorflow.Core.Util.TestLog
_TestResults'nameProto.Tensorflow.Core.Util.TestLog
_TestResults'runConfigurationProto.Tensorflow.Core.Util.TestLog
_TestResults'runModeProto.Tensorflow.Core.Util.TestLog
_TestResults'runTimeProto.Tensorflow.Core.Util.TestLog
_TestResults'startTimeProto.Tensorflow.Core.Util.TestLog
_TestResults'targetProto.Tensorflow.Core.Util.TestLog
_ThreadPoolOptionProto'globalNameProto.Tensorflow.Core.Protobuf.Config
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
_UnsafeReadVariableTensorFlow.GenOps.Core
_UnsafeReadVariable'TensorFlow.GenOps.Core
_ValuesDef'externalValuesProto.Tensorflow.Core.Protobuf.ControlFlow
_ValuesDef'ExternalValuesEntry'keyProto.Tensorflow.Core.Protobuf.ControlFlow
_ValuesDef'ExternalValuesEntry'valueProto.Tensorflow.Core.Protobuf.ControlFlow
_ValuesDef'valuesProto.Tensorflow.Core.Protobuf.ControlFlow
_VariableDef'initializerNameProto.Tensorflow.Core.Framework.Variable
_VariableDef'isResourceProto.Tensorflow.Core.Framework.Variable
_VariableDef'saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
_VariableDef'snapshotNameProto.Tensorflow.Core.Framework.Variable
_VariableDef'variableNameProto.Tensorflow.Core.Framework.Variable
_VarLenFeatureProto'dtypeProto.Tensorflow.Core.Example.ExampleParserConfiguration
_VarLenFeatureProto'indicesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
_VarLenFeatureProto'shapesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
_VarLenFeatureProto'valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
_VersionDef'badConsumersProto.Tensorflow.Core.Framework.Versions
_VersionDef'minConsumerProto.Tensorflow.Core.Framework.Versions
_VersionDef'producerProto.Tensorflow.Core.Framework.Versions
_WhileContextDef'backPropProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'loopEnterNamesProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'loopExitNamesProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'parallelIterationsProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'pivotForBodyNameProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'pivotForPredNameProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'swapMemoryProto.Tensorflow.Core.Protobuf.ControlFlow
_WhileContextDef'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
\ No newline at end of file diff --git a/docs/haddock/doc-index-B.html b/docs/haddock/doc-index-B.html index 425f2a2..2c94289 100644 --- a/docs/haddock/doc-index-B.html +++ b/docs/haddock/doc-index-B.html @@ -1,4 +1,4 @@ - (Index - B)

 

Index - B

bProto.Tensorflow.Core.Framework.AttrValue
barrierTensorFlow.GenOps.Core
barrier'TensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierClose'TensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierIncompleteSize'TensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierInsertMany'TensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
barrierReadySize'TensorFlow.GenOps.Core
barrierTakeManyTensorFlow.GenOps.Core
barrierTakeMany'TensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholesky'TensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchCholeskyGrad'TensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT'TensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT2D'TensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchFFT3D'TensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT'TensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT2D'TensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchIFFT3D'TensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatMul'TensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixBandPart'TensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDeterminant'TensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiag'TensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixDiagPart'TensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixInverse'TensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSetDiag'TensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolve'TensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixSolveLs'TensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchMatrixTriangularSolve'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEig'TensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSelfAdjointEigV2'TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchSvd'TensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpace'TensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
batchToSpaceND'TensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
betainc'TensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAdd'TensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddGrad'TensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasAddV1'TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bitcastTensorFlow.GenOps.Core
bitcast'TensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
broadcastArgsTensorFlow.GenOps.Core
broadcastArgs'TensorFlow.GenOps.Core
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
broadcastGradientArgs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
bucketProto.Tensorflow.Core.Framework.Summary
bucketLimitProto.Tensorflow.Core.Framework.Summary
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
BuildInputsTensorFlow.BuildOp
buildInputsTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildResultTensorFlow.BuildOp
buildResultTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
\ No newline at end of file +

 

Index - B

bProto.Tensorflow.Core.Framework.AttrValue
backPropProto.Tensorflow.Core.Protobuf.ControlFlow
badConsumersProto.Tensorflow.Core.Framework.Versions
barrierTensorFlow.GenOps.Core
barrier'TensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierClose'TensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierIncompleteSize'TensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierInsertMany'TensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
barrierReadySize'TensorFlow.GenOps.Core
barrierTakeManyTensorFlow.GenOps.Core
barrierTakeMany'TensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholesky'TensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchCholeskyGrad'TensorFlow.GenOps.Core
batchDatasetTensorFlow.GenOps.Core
batchDataset'TensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT'TensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT2D'TensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchFFT3D'TensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT'TensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT2D'TensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchIFFT3D'TensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatMul'TensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixBandPart'TensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDeterminant'TensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiag'TensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixDiagPart'TensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixInverse'TensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSetDiag'TensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolve'TensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixSolveLs'TensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchMatrixTriangularSolve'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEig'TensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSelfAdjointEigV2'TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchSvd'TensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpace'TensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
batchToSpaceND'TensorFlow.GenOps.Core
BenchmarkEntries 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
BenchmarkEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
BenchmarkEntry'ExtrasEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
benchmarkTypeProto.Tensorflow.Core.Util.TestLog
betaincTensorFlow.GenOps.Core
betainc'TensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAdd'TensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddGrad'TensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasAddV1'TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bincountTensorFlow.GenOps.Core
bincount'TensorFlow.GenOps.Core
bitcastTensorFlow.GenOps.Core
bitcast'TensorFlow.GenOps.Core
bitsProto.Tensorflow.Core.Util.TestLog
bitwiseAndTensorFlow.GenOps.Core
bitwiseAnd'TensorFlow.GenOps.Core
bitwiseOrTensorFlow.GenOps.Core
bitwiseOr'TensorFlow.GenOps.Core
bitwiseXorTensorFlow.GenOps.Core
bitwiseXor'TensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
branchProto.Tensorflow.Core.Protobuf.ControlFlow
broadcastArgsTensorFlow.GenOps.Core
broadcastArgs'TensorFlow.GenOps.Core
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
broadcastGradientArgs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
bucketProto.Tensorflow.Core.Framework.Summary
bucketizeTensorFlow.GenOps.Core
bucketize'TensorFlow.GenOps.Core
bucketLimitProto.Tensorflow.Core.Framework.Summary
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
BuildConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
buildConfigurationProto.Tensorflow.Core.Util.TestLog
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
BuildInputsTensorFlow.BuildOp
buildInputsTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildResultTensorFlow.BuildOp
buildResultTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
BundleEntryProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorBundle
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorBundle
BundleHeaderProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorBundle
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorBundle
BundleHeaderProto'BIGProto.Tensorflow.Core.Protobuf.TensorBundle
BundleHeaderProto'EndiannessProto.Tensorflow.Core.Protobuf.TensorBundle
BundleHeaderProto'LITTLEProto.Tensorflow.Core.Protobuf.TensorBundle
busId 
1 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
2 (Function)Proto.Tensorflow.Core.Util.TestLog
BytesList 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
bytesList 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
\ No newline at end of file diff --git a/docs/haddock/doc-index-C.html b/docs/haddock/doc-index-C.html index 7f91a62..883ad28 100644 --- a/docs/haddock/doc-index-C.html +++ b/docs/haddock/doc-index-C.html @@ -1,4 +1,4 @@ - (Index - C)

 

Index - C

camelCaseTensorFlow.OpGen.ParsedOp
cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
cast' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ceilTensorFlow.GenOps.Core
ceil'TensorFlow.GenOps.Core
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
checkNumerics'TensorFlow.GenOps.Core
checkpointPathProto.Tensorflow.Core.Util.Event
choleskyTensorFlow.GenOps.Core
cholesky'TensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
choleskyGrad'TensorFlow.GenOps.Core
collectAllSummariesTensorFlow.Tensor
colocateWithTensorFlow.Tensor, TensorFlow.Core
colorspaceProto.Tensorflow.Core.Framework.Summary
complexTensorFlow.GenOps.Core
complex'TensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
complexAbs'TensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
computeAccidentalHits'TensorFlow.GenOps.Core
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concat' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatOffsetTensorFlow.GenOps.Core
concatOffset'TensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
concatV2'TensorFlow.GenOps.Core
conditionalAccumulatorTensorFlow.GenOps.Core
conditionalAccumulator'TensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
conj'TensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
const'TensorFlow.GenOps.Core
constantTensorFlow.Ops
constant'TensorFlow.Ops
containerProto.Tensorflow.Core.Framework.ResourceHandle
contentTypeProto.Tensorflow.Core.Framework.Summary
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
controlTriggerTensorFlow.GenOps.Core
controlTrigger'TensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2D'TensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropFilter'TensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv2DBackpropInput'TensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3D'TensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilter'TensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropFilterV2'TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInput'TensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
conv3DBackpropInputV2'TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copy'TensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
copyHost'TensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
cos'TensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
countUpToTensorFlow.GenOps.Core
countUpTo'TensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResize'TensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradBoxes'TensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
cropAndResizeGradImage'TensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cross'TensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCBeamSearchDecoder'TensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCGreedyDecoder'TensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cTCLoss'TensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumprod'TensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
cumsum'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - C

cacheDatasetTensorFlow.GenOps.Core
cacheDataset'TensorFlow.GenOps.Core
cacheSizeProto.Tensorflow.Core.Util.TestLog
camelCaseTensorFlow.OpGen.ParsedOp
CANCELLEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
cancelOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
cast' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ccFlagsProto.Tensorflow.Core.Util.TestLog
ceilTensorFlow.GenOps.Core
ceil'TensorFlow.GenOps.Core
changelistProto.Tensorflow.Core.Util.TestLog
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
checkNumerics'TensorFlow.GenOps.Core
checkpointPathProto.Tensorflow.Core.Util.Event
choleskyTensorFlow.GenOps.Core
cholesky'TensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
choleskyGrad'TensorFlow.GenOps.Core
closeOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
ClusterDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Cluster
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Cluster
clusterDefProto.Tensorflow.Core.Protobuf.Config
CodeProto.Tensorflow.Core.Lib.Core.ErrorCodes
collectAllSummariesTensorFlow.Tensor
CollectionDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
collectionDefProto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'AnyList 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'AnyList'Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'BytesList 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'BytesList'Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'FloatList 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'FloatList'Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'Int64List 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'Int64List'Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'KindProto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'NodeList 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
CollectionDef'NodeList'Proto.Tensorflow.Core.Protobuf.MetaGraph
colocateWithTensorFlow.Tensor, TensorFlow.Core
colorspaceProto.Tensorflow.Core.Framework.Summary
CommitId 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
commitIdProto.Tensorflow.Core.Util.TestLog
CommitId'ChangelistProto.Tensorflow.Core.Util.TestLog
CommitId'HashProto.Tensorflow.Core.Util.TestLog
CommitId'KindProto.Tensorflow.Core.Util.TestLog
complexTensorFlow.GenOps.Core
complex'TensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
complexAbs'TensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
computeAccidentalHits'TensorFlow.GenOps.Core
computeCostProto.Tensorflow.Core.Framework.CostGraph
computeTimeProto.Tensorflow.Core.Framework.CostGraph
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concat' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatenateDatasetTensorFlow.GenOps.Core
concatenateDataset'TensorFlow.GenOps.Core
concatOffsetTensorFlow.GenOps.Core
concatOffset'TensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
concatV2'TensorFlow.GenOps.Core
CondContextDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
conditionalAccumulatorTensorFlow.GenOps.Core
conditionalAccumulator'TensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
conj'TensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
const'TensorFlow.GenOps.Core
constantTensorFlow.Ops
constant'TensorFlow.Ops
constantFoldingProto.Tensorflow.Core.Protobuf.RewriterConfig
constraintProto.Tensorflow.Core.Framework.KernelDef
containerProto.Tensorflow.Core.Framework.ResourceHandle
contentProto.Tensorflow.Core.Framework.Summary
contentTypeProto.Tensorflow.Core.Framework.Summary
contextProto.Tensorflow.Core.Example.Example
contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
controlInputProto.Tensorflow.Core.Framework.CostGraph
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
controlTriggerTensorFlow.GenOps.Core
controlTrigger'TensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2D'TensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropFilter'TensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv2DBackpropInput'TensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3D'TensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilter'TensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropFilterV2'TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInput'TensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
conv3DBackpropInputV2'TensorFlow.GenOps.Core
cooSparseProto.Tensorflow.Core.Protobuf.MetaGraph
cosTensorFlow.GenOps.Core
cos'TensorFlow.GenOps.Core
coshTensorFlow.GenOps.Core
cosh'TensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
CostGraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
2 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
CostGraphDef'Node 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
2 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
CostGraphDef'Node'InputInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
2 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
CostGraphDef'Node'OutputInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
2 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
countUpToTensorFlow.GenOps.Core
countUpTo'TensorFlow.GenOps.Core
cpuGovernorProto.Tensorflow.Core.Util.TestLog
CPUInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
cpuInfoProto.Tensorflow.Core.Util.TestLog
CPUInfo'CacheSizeEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
cpuTimeProto.Tensorflow.Core.Util.TestLog
crc32cProto.Tensorflow.Core.Protobuf.TensorBundle
cropAndResizeTensorFlow.GenOps.Core
cropAndResize'TensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradBoxes'TensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
cropAndResizeGradImage'TensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cross'TensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCBeamSearchDecoder'TensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCGreedyDecoder'TensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cTCLoss'TensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumprod'TensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
cumsum'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-D.html b/docs/haddock/doc-index-D.html index cdd4c2c..36c879d 100644 --- a/docs/haddock/doc-index-D.html +++ b/docs/haddock/doc-index-D.html @@ -1,4 +1,4 @@ - (Index - D)

 

Index - D

DataType 
1 (Type/Class)TensorFlow.Types
2 (Type/Class)Proto.Tensorflow.Core.Framework.Types
dcomplexValProto.Tensorflow.Core.Framework.Tensor
debugIdentityTensorFlow.GenOps.Core
debugIdentity'TensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugNanCount'TensorFlow.GenOps.Core
debugNumericSummaryTensorFlow.GenOps.Core
debugNumericSummary'TensorFlow.GenOps.Core
debugOptionsProto.Tensorflow.Core.Protobuf.Config
decodeBase64TensorFlow.GenOps.Core
decodeBase64'TensorFlow.GenOps.Core
decodeCSVTensorFlow.GenOps.Core
decodeCSV'TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeGif'TensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJpeg'TensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodeJSONExample'TensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodePng'TensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeRaw'TensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types, TensorFlow.Core
decodeTFRecordsTensorFlow.Records.Conduit
defaultValueProto.Tensorflow.Core.Framework.OpDef
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deleteSessionTensor'TensorFlow.GenOps.Core
denseToDenseSetOperationTensorFlow.GenOps.Core
denseToDenseSetOperation'TensorFlow.GenOps.Core
denseToSparseSetOperationTensorFlow.GenOps.Core
denseToSparseSetOperation'TensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthToSpace'TensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNative'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequantize'TensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
deserializeManySparse'TensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
destroyTemporaryVariable'TensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Core
2 (Type/Class)TensorFlow.Output, TensorFlow.Core
device 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceNameTensorFlow.Output, TensorFlow.Core
diagTensorFlow.GenOps.Core
diag'TensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
diagPart'TensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
digamma'TensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2D'TensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropFilter'TensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dilation2DBackpropInput'TensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
divTensorFlow.GenOps.Core
div'TensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
drawBoundingBoxesTensorFlow.GenOps.Core
drawBoundingBoxes'TensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtypeProto.Tensorflow.Core.Framework.Tensor
DT_BFLOAT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INVALID 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicPartition'TensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
dynamicStitch'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - D

data'Proto.Tensorflow.Core.Util.SavedTensorSlice
DataType 
1 (Type/Class)TensorFlow.Types
2 (Type/Class)Proto.Tensorflow.Core.Framework.Types
DATA_LOSSProto.Tensorflow.Core.Lib.Core.ErrorCodes
dcomplexValProto.Tensorflow.Core.Framework.Tensor
DEADLINE_EXCEEDEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
debugGradientIdentityTensorFlow.GenOps.Core
debugGradientIdentity'TensorFlow.GenOps.Core
debugOpsProto.Tensorflow.Core.Protobuf.Debug
DebugOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Debug
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Debug
debugOptionsProto.Tensorflow.Core.Protobuf.Config
DebugTensorWatch 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Debug
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Debug
debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Debug
debugUrlsProto.Tensorflow.Core.Protobuf.Debug
decodeBase64TensorFlow.GenOps.Core
decodeBase64'TensorFlow.GenOps.Core
decodeBmpTensorFlow.GenOps.Core
decodeBmp'TensorFlow.GenOps.Core
decodeCSVTensorFlow.GenOps.Core
decodeCSV'TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeGif'TensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJpeg'TensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodeJSONExample'TensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodePng'TensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeRaw'TensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types, TensorFlow.Core
decodeTFRecordsTensorFlow.Records.Conduit
decodeWavTensorFlow.GenOps.Core
decodeWav'TensorFlow.GenOps.Core
defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
defaultValue 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
deferredProto.Tensorflow.Core.Framework.LogMemory
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deleteSessionTensor'TensorFlow.GenOps.Core
denseShapeTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
denseToDenseSetOperationTensorFlow.GenOps.Core
denseToDenseSetOperation'TensorFlow.GenOps.Core
denseToSparseBatchDatasetTensorFlow.GenOps.Core
denseToSparseBatchDataset'TensorFlow.GenOps.Core
denseToSparseSetOperationTensorFlow.GenOps.Core
denseToSparseSetOperation'TensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthToSpace'TensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNative'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequantize'TensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
deserializeManySparse'TensorFlow.GenOps.Core
destroyResourceOpTensorFlow.GenOps.Core
destroyResourceOp'TensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
destroyTemporaryVariable'TensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Core
2 (Type/Class)TensorFlow.Output, TensorFlow.Core
device 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
4 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
DeviceAttributes 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.DeviceAttributes
2 (Type/Class)Proto.Tensorflow.Core.Framework.DeviceAttributes
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceInfoProto.Tensorflow.Core.Util.TestLog
DeviceLocality 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.DeviceAttributes
2 (Type/Class)Proto.Tensorflow.Core.Framework.DeviceAttributes
deviceNameTensorFlow.Output, TensorFlow.Core
devicePersistentMemorySize 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
devicePersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
DeviceStepStats 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
deviceTempMemorySize 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
deviceType 
1 (Function)Proto.Tensorflow.Core.Framework.KernelDef
2 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
devStatsProto.Tensorflow.Core.Framework.StepStats
diagTensorFlow.GenOps.Core
diag'TensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
diagPart'TensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
digamma'TensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2D'TensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropFilter'TensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dilation2DBackpropInput'TensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
disableModelPruningProto.Tensorflow.Core.Protobuf.RewriterConfig
displayNameProto.Tensorflow.Core.Framework.Summary
divTensorFlow.GenOps.Core
div'TensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
doubleValueProto.Tensorflow.Core.Util.TestLog
DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_Proto.Tensorflow.Core.Lib.Core.ErrorCodes
drawBoundingBoxesTensorFlow.GenOps.Core
drawBoundingBoxes'TensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtype 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
3 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
4 (Function)Proto.Tensorflow.Core.Framework.CostGraph
5 (Function)Proto.Tensorflow.Core.Framework.Tensor
6 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
DT_BFLOAT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INVALID 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicPartition'TensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
dynamicStitch'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-E.html b/docs/haddock/doc-index-E.html index 9433bf4..e239a32 100644 --- a/docs/haddock/doc-index-E.html +++ b/docs/haddock/doc-index-E.html @@ -1,4 +1,4 @@ - (Index - E)

 

Index - E

editDistanceTensorFlow.GenOps.Core
editDistance'TensorFlow.GenOps.Core
eluTensorFlow.GenOps.Core
elu'TensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
eluGrad'TensorFlow.GenOps.Core
embeddingLookupTensorFlow.EmbeddingOps
enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
encodeBase64TensorFlow.GenOps.Core
encodeBase64'TensorFlow.GenOps.Core
encodedAudioStringProto.Tensorflow.Core.Framework.Summary
encodedImageStringProto.Tensorflow.Core.Framework.Summary
encodeJpegTensorFlow.GenOps.Core
encodeJpeg'TensorFlow.GenOps.Core
encodeOutputTensorFlow.Build
encodePngTensorFlow.GenOps.Core
encodePng'TensorFlow.GenOps.Core
encodeTensorDataTensorFlow.Types, TensorFlow.Core
encodeTFRecordsTensorFlow.Records.Conduit
enqueueTensorFlow.Queue
enterTensorFlow.GenOps.Core
enter'TensorFlow.GenOps.Core
eqLengthGuardTensorFlow.BuildOp
equal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
equal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
erfTensorFlow.GenOps.Core
erf'TensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
erfc'TensorFlow.GenOps.Core
evalBuildTTensorFlow.Build
Event 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
EventWriterTensorFlow.Logging
ExcludedCaseTensorFlow.Types
excludeListTensorFlow.OpGen
exitTensorFlow.GenOps.Core
exit'TensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
exp'TensorFlow.GenOps.Core
expandDims 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
expandDims' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
explanationProto.Tensorflow.Core.Framework.OpDef
explicitInputAttrsTensorFlow.OpGen.ParsedOp
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
expm1TensorFlow.GenOps.Core
expm1'TensorFlow.GenOps.Core
exprTensorFlow.Tensor, TensorFlow.Core
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
extractGlimpseTensorFlow.GenOps.Core
extractGlimpse'TensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
extractImagePatches'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - E

editDistanceTensorFlow.GenOps.Core
editDistance'TensorFlow.GenOps.Core
elementProto.Tensorflow.Core.Util.MemmappedFileSystem
eluTensorFlow.GenOps.Core
elu'TensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
eluGrad'TensorFlow.GenOps.Core
embeddingLookupTensorFlow.EmbeddingOps
enableProto.Tensorflow.Core.Protobuf.RewriterConfig
enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
encodeBase64TensorFlow.GenOps.Core
encodeBase64'TensorFlow.GenOps.Core
encodedAudioStringProto.Tensorflow.Core.Framework.Summary
encodedImageStringProto.Tensorflow.Core.Framework.Summary
encodeJpegTensorFlow.GenOps.Core
encodeJpeg'TensorFlow.GenOps.Core
encodeOutputTensorFlow.Build
encodePngTensorFlow.GenOps.Core
encodePng'TensorFlow.GenOps.Core
encodeTensorDataTensorFlow.Types, TensorFlow.Core
encodeTFRecordsTensorFlow.Records.Conduit
encodeWavTensorFlow.GenOps.Core
encodeWav'TensorFlow.GenOps.Core
endiannessProto.Tensorflow.Core.Protobuf.TensorBundle
enqueueTensorFlow.Queue
enqueueOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
enterTensorFlow.GenOps.Core
enter'TensorFlow.GenOps.Core
entriesProto.Tensorflow.Core.Util.TestLog
entryProto.Tensorflow.Core.Util.TestLog
EntryValue 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
EntryValue'DoubleValueProto.Tensorflow.Core.Util.TestLog
EntryValue'KindProto.Tensorflow.Core.Util.TestLog
EntryValue'StringValueProto.Tensorflow.Core.Util.TestLog
eqLengthGuardTensorFlow.BuildOp
equal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
equal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
erfTensorFlow.GenOps.Core
erf'TensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
erfc'TensorFlow.GenOps.Core
evalBuildTTensorFlow.Build
Event 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
Event'FileVersionProto.Tensorflow.Core.Util.Event
Event'GraphDefProto.Tensorflow.Core.Util.Event
Event'LogMessageProto.Tensorflow.Core.Util.Event
Event'MetaGraphDefProto.Tensorflow.Core.Util.Event
Event'SessionLogProto.Tensorflow.Core.Util.Event
Event'SummaryProto.Tensorflow.Core.Util.Event
Event'TaggedRunMetadataProto.Tensorflow.Core.Util.Event
Event'WhatProto.Tensorflow.Core.Util.Event
EventWriterTensorFlow.Logging
Example 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Example
2 (Type/Class)Proto.Tensorflow.Core.Example.Example
ExampleParserConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
ExampleParserConfiguration'FeatureMapEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
ExcludedCaseTensorFlow.Types
excludeListTensorFlow.OpGen
exitTensorFlow.GenOps.Core
exit'TensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
exp'TensorFlow.GenOps.Core
expandDims 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
expandDims' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
explanationProto.Tensorflow.Core.Framework.OpDef
explicitInputAttrsTensorFlow.OpGen.ParsedOp
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
expm1TensorFlow.GenOps.Core
expm1'TensorFlow.GenOps.Core
exprTensorFlow.Tensor, TensorFlow.Core
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
extentProto.Tensorflow.Core.Framework.TensorSlice
externalValuesProto.Tensorflow.Core.Protobuf.ControlFlow
extractGlimpseTensorFlow.GenOps.Core
extractGlimpse'TensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
extractImagePatches'TensorFlow.GenOps.Core
extrasProto.Tensorflow.Core.Util.TestLog
\ No newline at end of file diff --git a/docs/haddock/doc-index-F.html b/docs/haddock/doc-index-F.html index a461543..0d05503 100644 --- a/docs/haddock/doc-index-F.html +++ b/docs/haddock/doc-index-F.html @@ -1,4 +1,4 @@ - (Index - F)

 

Index - F

fProto.Tensorflow.Core.Framework.AttrValue
factTensorFlow.GenOps.Core
fact'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
fakeQueueTensorFlow.GenOps.Core
fakeQueue'TensorFlow.GenOps.Core
Feed 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
fFTTensorFlow.GenOps.Core
fFT'TensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT2D'TensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fFT3D'TensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fIFOQueue'TensorFlow.GenOps.Core
fIFOQueueV2TensorFlow.GenOps.Core
fIFOQueueV2'TensorFlow.GenOps.Core
fileVersionProto.Tensorflow.Core.Util.Event
fill 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fill' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedLengthRecordReader'TensorFlow.GenOps.Core
fixedLengthRecordReaderV2TensorFlow.GenOps.Core
fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
flagParserTensorFlow.OpGen
floatValProto.Tensorflow.Core.Framework.Tensor
floorTensorFlow.GenOps.Core
floor'TensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorDiv'TensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
floorMod'TensorFlow.GenOps.Core
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPool'TensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalAvgPoolGrad'TensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPool'TensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolGrad'TensorFlow.GenOps.Core
fromTensorTypeListTensorFlow.Types
fromTensorTypesTensorFlow.Types
funcProto.Tensorflow.Core.Framework.AttrValue
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNorm'TensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedBatchNormGrad'TensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedPadConv2D'TensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - F

fProto.Tensorflow.Core.Framework.AttrValue
factTensorFlow.GenOps.Core
fact'TensorFlow.GenOps.Core
FAILED_PRECONDITIONProto.Tensorflow.Core.Lib.Core.ErrorCodes
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
fakeQueueTensorFlow.GenOps.Core
fakeQueue'TensorFlow.GenOps.Core
Feature 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featureProto.Tensorflow.Core.Example.Feature
Feature'BytesListProto.Tensorflow.Core.Example.Feature
Feature'FloatListProto.Tensorflow.Core.Example.Feature
Feature'Int64ListProto.Tensorflow.Core.Example.Feature
Feature'KindProto.Tensorflow.Core.Example.Feature
FeatureConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
FeatureConfiguration'ConfigProto.Tensorflow.Core.Example.ExampleParserConfiguration
FeatureConfiguration'FixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
FeatureConfiguration'VarLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
FeatureList 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featureListProto.Tensorflow.Core.Example.Feature
FeatureLists 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featureListsProto.Tensorflow.Core.Example.Example
FeatureLists'FeatureListEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featureMapProto.Tensorflow.Core.Example.ExampleParserConfiguration
Features 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
featuresProto.Tensorflow.Core.Example.Example
Features'FeatureEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
Feed 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
fFTTensorFlow.GenOps.Core
fFT'TensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT2D'TensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fFT3D'TensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fIFOQueue'TensorFlow.GenOps.Core
fIFOQueueV2TensorFlow.GenOps.Core
fIFOQueueV2'TensorFlow.GenOps.Core
filenameProto.Tensorflow.Core.Protobuf.MetaGraph
filenameTensorNameProto.Tensorflow.Core.Protobuf.Saver
fileVersionProto.Tensorflow.Core.Util.Event
fill 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fill' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
FixedLenFeatureProto 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
fixedLengthRecordDatasetTensorFlow.GenOps.Core
fixedLengthRecordDataset'TensorFlow.GenOps.Core
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedLengthRecordReader'TensorFlow.GenOps.Core
fixedLengthRecordReaderV2TensorFlow.GenOps.Core
fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
flagParserTensorFlow.OpGen
FloatList 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
floatList 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
floatValProto.Tensorflow.Core.Framework.Tensor
floorTensorFlow.GenOps.Core
floor'TensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorDiv'TensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
floorMod'TensorFlow.GenOps.Core
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
forceGpuCompatibleProto.Tensorflow.Core.Protobuf.Config
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPool'TensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalAvgPoolGrad'TensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPool'TensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolGrad'TensorFlow.GenOps.Core
fromTensorTypeListTensorFlow.Types
fromTensorTypesTensorFlow.Types
fullNameProto.Tensorflow.Core.Framework.Variable
fullShapeProto.Tensorflow.Core.Framework.Variable
funcProto.Tensorflow.Core.Framework.AttrValue
functionProto.Tensorflow.Core.Framework.Function
FunctionDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
FunctionDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
FunctionDef'RetEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
FunctionDefLibrary 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
functionNameProto.Tensorflow.Core.Framework.Function
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNorm'TensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedBatchNormGrad'TensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedPadConv2D'TensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-G.html b/docs/haddock/doc-index-G.html index 2cecaf0..b8aec9a 100644 --- a/docs/haddock/doc-index-G.html +++ b/docs/haddock/doc-index-G.html @@ -1,4 +1,4 @@ - (Index - G)

 

\ No newline at end of file +

 

Index - G

gatherTensorFlow.GenOps.Core
gather'TensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
gatherNd'TensorFlow.GenOps.Core
gatherV2TensorFlow.GenOps.Core
gatherV2'TensorFlow.GenOps.Core
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getSessionHandleTensorFlow.GenOps.Core
getSessionHandle'TensorFlow.GenOps.Core
getSessionHandleV2TensorFlow.GenOps.Core
getSessionHandleV2'TensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
getSessionTensor'TensorFlow.GenOps.Core
getTFRecordTensorFlow.Records
getTFRecordDataTensorFlow.Records
getTFRecordLengthTensorFlow.Records
getTFRecordsTensorFlow.Records
getVarIntTensorFlow.Internal.VarInt
globalJitLevelProto.Tensorflow.Core.Protobuf.Config
globalNameProto.Tensorflow.Core.Protobuf.Config
globalStepProto.Tensorflow.Core.Protobuf.Debug
GPUInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
GPUOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
gpuOptionsProto.Tensorflow.Core.Protobuf.Config
gradientProto.Tensorflow.Core.Framework.Function
GradientCompatibleTensorFlow.Gradient
GradientDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
2 (Type/Class)Proto.Tensorflow.Core.Framework.Function
gradientDescentTensorFlow.Minimize
gradientFuncProto.Tensorflow.Core.Framework.Function
gradientsTensorFlow.Gradient
GraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
2 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
graphDef 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Util.Event
GraphOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
graphOptionsProto.Tensorflow.Core.Protobuf.Config
GraphStateTensorFlow.Build
greaterTensorFlow.GenOps.Core
greater'TensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
greaterEqual'TensorFlow.GenOps.Core
groupTensorFlow.ControlFlow, TensorFlow.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-H.html b/docs/haddock/doc-index-H.html index 2d76199..c7f695b 100644 --- a/docs/haddock/doc-index-H.html +++ b/docs/haddock/doc-index-H.html @@ -1,4 +1,4 @@ - (Index - H)

 

\ No newline at end of file +

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-I.html b/docs/haddock/doc-index-I.html index 832c83a..338dbc7 100644 --- a/docs/haddock/doc-index-I.html +++ b/docs/haddock/doc-index-I.html @@ -1,4 +1,4 @@ - (Index - I)

 

Index - I

iProto.Tensorflow.Core.Framework.AttrValue
identity 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identity' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identityReaderTensorFlow.GenOps.Core
identityReader'TensorFlow.GenOps.Core
identityReaderV2TensorFlow.GenOps.Core
identityReaderV2'TensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT'TensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT2D'TensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
iFFT3D'TensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igamma'TensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
igammac'TensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imag'TensorFlow.GenOps.Core
imageProto.Tensorflow.Core.Framework.Summary
imageSummaryTensorFlow.GenOps.Core
imageSummary'TensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
immutableConst'TensorFlow.GenOps.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
inferredTypeAttrsTensorFlow.OpGen.ParsedOp
inferShapesProto.Tensorflow.Core.Protobuf.Config
initializedVariableTensorFlow.Ops
initializedVariable'TensorFlow.Ops
initializeTableTensorFlow.GenOps.Core
initializeTable'TensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
initializeTableFromTextFile'TensorFlow.GenOps.Core
inputProto.Tensorflow.Core.Framework.NodeDef
inputArgProto.Tensorflow.Core.Framework.OpDef
int64ValProto.Tensorflow.Core.Framework.Tensor
interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
inTopKTensorFlow.GenOps.Core
inTopK'TensorFlow.GenOps.Core
intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
intValProto.Tensorflow.Core.Framework.Tensor
invTensorFlow.GenOps.Core
inv'TensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invertPermutation'TensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
invGrad'TensorFlow.GenOps.Core
isAggregateProto.Tensorflow.Core.Framework.OpDef
isCommutativeProto.Tensorflow.Core.Framework.OpDef
isFiniteTensorFlow.GenOps.Core
isFinite'TensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isInf'TensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isNan'TensorFlow.GenOps.Core
isRefProto.Tensorflow.Core.Framework.OpDef
isStatefulProto.Tensorflow.Core.Framework.OpDef
isVariableInitializedTensorFlow.GenOps.Core
isVariableInitialized'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - I

iProto.Tensorflow.Core.Framework.AttrValue
idProto.Tensorflow.Core.Framework.CostGraph
identity 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identity' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identityReaderTensorFlow.GenOps.Core
identityReader'TensorFlow.GenOps.Core
identityReaderV2TensorFlow.GenOps.Core
identityReaderV2'TensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT'TensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT2D'TensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
iFFT3D'TensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igamma'TensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
igammac'TensorFlow.GenOps.Core
ignoreErrorsDatasetTensorFlow.GenOps.Core
ignoreErrorsDataset'TensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imag'TensorFlow.GenOps.Core
imageProto.Tensorflow.Core.Framework.Summary
imageSummaryTensorFlow.GenOps.Core
imageSummary'TensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
immutableConst'TensorFlow.GenOps.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
incarnationProto.Tensorflow.Core.Framework.DeviceAttributes
indexProto.Tensorflow.Core.Framework.LogMemory
indicesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
indicesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
inferredTypeAttrsTensorFlow.OpGen.ParsedOp
inferShapesProto.Tensorflow.Core.Protobuf.Config
initializedValueTensorFlow.Variable
initializedVariable 
1 (Function)TensorFlow.Variable
2 (Function)TensorFlow.Ops
initializedVariable' 
1 (Function)TensorFlow.Variable
2 (Function)TensorFlow.Ops
initializerNameProto.Tensorflow.Core.Framework.Variable
initializeTableTensorFlow.GenOps.Core
initializeTable'TensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
initializeTableFromTextFile'TensorFlow.GenOps.Core
initializeTableFromTextFileV2TensorFlow.GenOps.Core
initializeTableFromTextFileV2'TensorFlow.GenOps.Core
initializeTableV2TensorFlow.GenOps.Core
initializeTableV2'TensorFlow.GenOps.Core
inputProto.Tensorflow.Core.Framework.NodeDef
inputArgProto.Tensorflow.Core.Framework.OpDef
inputInfoProto.Tensorflow.Core.Framework.CostGraph
inputsProto.Tensorflow.Core.Protobuf.MetaGraph
Int64List 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
2 (Type/Class)Proto.Tensorflow.Core.Example.Feature
int64List 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
int64ValProto.Tensorflow.Core.Framework.Tensor
INTERNALProto.Tensorflow.Core.Lib.Core.ErrorCodes
interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
inTopKTensorFlow.GenOps.Core
inTopK'TensorFlow.GenOps.Core
intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
intValProto.Tensorflow.Core.Framework.Tensor
invTensorFlow.GenOps.Core
inv'TensorFlow.GenOps.Core
INVALID_ARGUMENTProto.Tensorflow.Core.Lib.Core.ErrorCodes
invertTensorFlow.GenOps.Core
invert'TensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invertPermutation'TensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
invGrad'TensorFlow.GenOps.Core
iRFFTTensorFlow.GenOps.Core
iRFFT'TensorFlow.GenOps.Core
iRFFT2DTensorFlow.GenOps.Core
iRFFT2D'TensorFlow.GenOps.Core
iRFFT3DTensorFlow.GenOps.Core
iRFFT3D'TensorFlow.GenOps.Core
isAggregateProto.Tensorflow.Core.Framework.OpDef
isCommutativeProto.Tensorflow.Core.Framework.OpDef
isFinalProto.Tensorflow.Core.Framework.CostGraph
isFiniteTensorFlow.GenOps.Core
isFinite'TensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isInf'TensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isNan'TensorFlow.GenOps.Core
isRefProto.Tensorflow.Core.Framework.OpDef
isResourceProto.Tensorflow.Core.Framework.Variable
isStatefulProto.Tensorflow.Core.Framework.OpDef
isVariableInitializedTensorFlow.GenOps.Core
isVariableInitialized'TensorFlow.GenOps.Core
iteratorTensorFlow.GenOps.Core
iterator'TensorFlow.GenOps.Core
iteratorDisposeTensorFlow.GenOps.Core
iteratorDispose'TensorFlow.GenOps.Core
iteratorFromStringHandleTensorFlow.GenOps.Core
iteratorFromStringHandle'TensorFlow.GenOps.Core
iteratorGetNextTensorFlow.GenOps.Core
iteratorGetNext'TensorFlow.GenOps.Core
iteratorToStringHandleTensorFlow.GenOps.Core
iteratorToStringHandle'TensorFlow.GenOps.Core
itersProto.Tensorflow.Core.Util.TestLog
\ No newline at end of file diff --git a/docs/haddock/doc-index-J.html b/docs/haddock/doc-index-J.html new file mode 100644 index 0000000..0aa3612 --- /dev/null +++ b/docs/haddock/doc-index-J.html @@ -0,0 +1,4 @@ + (Index - J)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-K.html b/docs/haddock/doc-index-K.html index 95697fc..e7c6c18 100644 --- a/docs/haddock/doc-index-K.html +++ b/docs/haddock/doc-index-K.html @@ -1,4 +1,4 @@ - (Index - K)

 

\ No newline at end of file +

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-L.html b/docs/haddock/doc-index-L.html index b03681b..8ac5f71 100644 --- a/docs/haddock/doc-index-L.html +++ b/docs/haddock/doc-index-L.html @@ -1,4 +1,4 @@ - (Index - L)

 

Index - L

l2LossTensorFlow.GenOps.Core
l2Loss'TensorFlow.GenOps.Core
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
lengthFramesProto.Tensorflow.Core.Framework.Summary
lessTensorFlow.GenOps.Core
less'TensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lessEqual'TensorFlow.GenOps.Core
levelProto.Tensorflow.Core.Util.Event
lgammaTensorFlow.GenOps.Core
lgamma'TensorFlow.GenOps.Core
libraryProto.Tensorflow.Core.Framework.Graph
linSpaceTensorFlow.GenOps.Core
linSpace'TensorFlow.GenOps.Core
ListTensorFlow.Types
listProto.Tensorflow.Core.Framework.AttrValue
ListArgTensorFlow.OpGen.ParsedOp
listDiffTensorFlow.GenOps.Core
listDiff'TensorFlow.GenOps.Core
ListOfTensorFlow.Types
logTensorFlow.GenOps.Core
log'TensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
log1p'TensorFlow.GenOps.Core
logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
logEventTensorFlow.Logging
logicalAndTensorFlow.GenOps.Core
logicalAnd'TensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalNot'TensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logicalOr'TensorFlow.GenOps.Core
LogMessage 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
logMessageProto.Tensorflow.Core.Util.Event
LogMessage'DEBUGProto.Tensorflow.Core.Util.Event
LogMessage'ERRORProto.Tensorflow.Core.Util.Event
LogMessage'FATALProto.Tensorflow.Core.Util.Event
LogMessage'INFOProto.Tensorflow.Core.Util.Event
LogMessage'LevelProto.Tensorflow.Core.Util.Event
LogMessage'UNKNOWNProto.Tensorflow.Core.Util.Event
LogMessage'WARNProto.Tensorflow.Core.Util.Event
logSoftmaxTensorFlow.GenOps.Core
logSoftmax'TensorFlow.GenOps.Core
logSummaryTensorFlow.Logging
logUniformCandidateSamplerTensorFlow.GenOps.Core
logUniformCandidateSampler'TensorFlow.GenOps.Core
lookupNodeTensorFlow.Build
lookupTableExportTensorFlow.GenOps.Core
lookupTableExport'TensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableFind'TensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableImport'TensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableInsert'TensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
lookupTableSize'TensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
loopCond'TensorFlow.GenOps.Core
lRNTensorFlow.GenOps.Core
lRN'TensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
lRNGrad'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - L

l2LossTensorFlow.GenOps.Core
l2Loss'TensorFlow.GenOps.Core
labelProto.Tensorflow.Core.Framework.KernelDef
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
lengthProto.Tensorflow.Core.Framework.TensorSlice
lengthFramesProto.Tensorflow.Core.Framework.Summary
lessTensorFlow.GenOps.Core
less'TensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lessEqual'TensorFlow.GenOps.Core
levelProto.Tensorflow.Core.Util.Event
lgammaTensorFlow.GenOps.Core
lgamma'TensorFlow.GenOps.Core
libraryProto.Tensorflow.Core.Framework.Graph
linkageProto.Tensorflow.Core.Util.TestLog
linSpaceTensorFlow.GenOps.Core
linSpace'TensorFlow.GenOps.Core
ListTensorFlow.Types
listProto.Tensorflow.Core.Framework.AttrValue
ListArgTensorFlow.OpGen.ParsedOp
listDiffTensorFlow.GenOps.Core
listDiff'TensorFlow.GenOps.Core
ListOfTensorFlow.Types
liveBytesProto.Tensorflow.Core.Framework.StepStats
lMDBReaderTensorFlow.GenOps.Core
lMDBReader'TensorFlow.GenOps.Core
localityProto.Tensorflow.Core.Framework.DeviceAttributes
logTensorFlow.GenOps.Core
log'TensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
log1p'TensorFlow.GenOps.Core
logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
logEventTensorFlow.Logging
logGraphTensorFlow.Logging
logicalAndTensorFlow.GenOps.Core
logicalAnd'TensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalNot'TensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logicalOr'TensorFlow.GenOps.Core
LogMessage 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
logMessageProto.Tensorflow.Core.Util.Event
LogMessage'DEBUGGINGProto.Tensorflow.Core.Util.Event
LogMessage'ERRORProto.Tensorflow.Core.Util.Event
LogMessage'FATALProto.Tensorflow.Core.Util.Event
LogMessage'INFOProto.Tensorflow.Core.Util.Event
LogMessage'LevelProto.Tensorflow.Core.Util.Event
LogMessage'UNKNOWNProto.Tensorflow.Core.Util.Event
LogMessage'WARNProto.Tensorflow.Core.Util.Event
logSoftmaxTensorFlow.GenOps.Core
logSoftmax'TensorFlow.GenOps.Core
logSummaryTensorFlow.Logging
logUniformCandidateSamplerTensorFlow.GenOps.Core
logUniformCandidateSampler'TensorFlow.GenOps.Core
lookupNodeTensorFlow.Build
lookupTableExportTensorFlow.GenOps.Core
lookupTableExport'TensorFlow.GenOps.Core
lookupTableExportV2TensorFlow.GenOps.Core
lookupTableExportV2'TensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableFind'TensorFlow.GenOps.Core
lookupTableFindV2TensorFlow.GenOps.Core
lookupTableFindV2'TensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableImport'TensorFlow.GenOps.Core
lookupTableImportV2TensorFlow.GenOps.Core
lookupTableImportV2'TensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableInsert'TensorFlow.GenOps.Core
lookupTableInsertV2TensorFlow.GenOps.Core
lookupTableInsertV2'TensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
lookupTableSize'TensorFlow.GenOps.Core
lookupTableSizeV2TensorFlow.GenOps.Core
lookupTableSizeV2'TensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
loopCond'TensorFlow.GenOps.Core
loopEnterNamesProto.Tensorflow.Core.Protobuf.ControlFlow
loopExitNamesProto.Tensorflow.Core.Protobuf.ControlFlow
lRNTensorFlow.GenOps.Core
lRN'TensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
lRNGrad'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-M.html b/docs/haddock/doc-index-M.html index b05e1fc..653dbc1 100644 --- a/docs/haddock/doc-index-M.html +++ b/docs/haddock/doc-index-M.html @@ -1,4 +1,4 @@ - (Index - M)

 

Index - M

makeQueueTensorFlow.Queue
matchingFilesTensorFlow.GenOps.Core
matchingFiles'TensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matMul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixBandPart'TensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDeterminant'TensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiag'TensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixDiagPart'TensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixInverse'TensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSetDiag'TensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolve'TensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixSolveLs'TensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matrixTriangularSolve'TensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
matTranspose'TensorFlow.Ops
max 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
max'TensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maximum'TensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool'TensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3D'TensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPool3DGrad'TensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGrad'TensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradWithArgmax'TensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmax'TensorFlow.GenOps.Core
maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
maybe'audioProto.Tensorflow.Core.Framework.Summary
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'fileVersionProto.Tensorflow.Core.Util.Event
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphDefProto.Tensorflow.Core.Util.Event
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'histoProto.Tensorflow.Core.Framework.Summary
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'imageProto.Tensorflow.Core.Framework.Summary
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'logMessageProto.Tensorflow.Core.Util.Event
maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'sessionLogProto.Tensorflow.Core.Util.Event
maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'summaryProto.Tensorflow.Core.Util.Event
maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
maybe'tensor 
1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
2 (Function)Proto.Tensorflow.Core.Framework.Summary
maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
maybe'versionsProto.Tensorflow.Core.Framework.Graph
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
mean 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mean' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mergeTensorFlow.GenOps.Core
merge'TensorFlow.GenOps.Core
mergeAllSummariesTensorFlow.Logging
mergeSummaryTensorFlow.GenOps.Core
mergeSummary'TensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
mergeV2Checkpoints'TensorFlow.GenOps.Core
messageProto.Tensorflow.Core.Util.Event
metaGraphDefProto.Tensorflow.Core.Util.Event
min 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
min'TensorFlow.GenOps.Core
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
minimum'TensorFlow.GenOps.Core
mirrorPadTensorFlow.GenOps.Core
mirrorPad'TensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
mirrorPadGrad'TensorFlow.GenOps.Core
MixedListArgTensorFlow.OpGen.ParsedOp
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mod'TensorFlow.GenOps.Core
MonadBuildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
msgProto.Tensorflow.Core.Util.Event
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
multinomial'TensorFlow.GenOps.Core
mutableDenseHashTableTensorFlow.GenOps.Core
mutableDenseHashTable'TensorFlow.GenOps.Core
mutableHashTableTensorFlow.GenOps.Core
mutableHashTable'TensorFlow.GenOps.Core
mutableHashTableOfTensorsTensorFlow.GenOps.Core
mutableHashTableOfTensors'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - M

machineProto.Tensorflow.Core.Util.TestLog
MachineConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
machineConfigurationProto.Tensorflow.Core.Util.TestLog
makeIteratorTensorFlow.GenOps.Core
makeIterator'TensorFlow.GenOps.Core
makeQueueTensorFlow.Queue
mapClearTensorFlow.GenOps.Core
mapClear'TensorFlow.GenOps.Core
mapIncompleteSizeTensorFlow.GenOps.Core
mapIncompleteSize'TensorFlow.GenOps.Core
mapPeekTensorFlow.GenOps.Core
mapPeek'TensorFlow.GenOps.Core
mapSizeTensorFlow.GenOps.Core
mapSize'TensorFlow.GenOps.Core
mapStageTensorFlow.GenOps.Core
mapStage'TensorFlow.GenOps.Core
mapUnstageTensorFlow.GenOps.Core
mapUnstage'TensorFlow.GenOps.Core
mapUnstageNoKeyTensorFlow.GenOps.Core
mapUnstageNoKey'TensorFlow.GenOps.Core
matchingFilesTensorFlow.GenOps.Core
matchingFiles'TensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matMul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixBandPart'TensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDeterminant'TensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiag'TensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixDiagPart'TensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixInverse'TensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSetDiag'TensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolve'TensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixSolveLs'TensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matrixTriangularSolve'TensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
matTranspose'TensorFlow.Ops
max 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
max'TensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maximum'TensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool'TensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3D'TensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPool3DGrad'TensorFlow.GenOps.Core
maxPool3DGradGradTensorFlow.GenOps.Core
maxPool3DGradGrad'TensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGrad'TensorFlow.GenOps.Core
maxPoolGradGradTensorFlow.GenOps.Core
maxPoolGradGrad'TensorFlow.GenOps.Core
maxPoolGradGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradGradWithArgmax'TensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradWithArgmax'TensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmax'TensorFlow.GenOps.Core
maxToKeepProto.Tensorflow.Core.Protobuf.Saver
maybe'allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
maybe'allowedValues 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.KernelDef
maybe'anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'anyListProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'audioProto.Tensorflow.Core.Framework.Summary
maybe'autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'buildConfigurationProto.Tensorflow.Core.Util.TestLog
maybe'bytesList 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
maybe'changelistProto.Tensorflow.Core.Util.TestLog
maybe'clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
maybe'clusterDefProto.Tensorflow.Core.Protobuf.Config
maybe'commitIdProto.Tensorflow.Core.Util.TestLog
maybe'configProto.Tensorflow.Core.Example.ExampleParserConfiguration
maybe'contextProto.Tensorflow.Core.Example.Example
maybe'cooSparseProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'cpuInfoProto.Tensorflow.Core.Util.TestLog
maybe'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
maybe'defaultValue 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'doubleValueProto.Tensorflow.Core.Util.TestLog
maybe'encodingProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'entriesProto.Tensorflow.Core.Util.TestLog
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'featureListsProto.Tensorflow.Core.Example.Example
maybe'featuresProto.Tensorflow.Core.Example.Example
maybe'fileVersionProto.Tensorflow.Core.Util.Event
maybe'fixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
maybe'floatList 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphDef 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Util.Event
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'hashProto.Tensorflow.Core.Util.TestLog
maybe'hasLengthProto.Tensorflow.Core.Framework.TensorSlice
maybe'histoProto.Tensorflow.Core.Framework.Summary
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'imageProto.Tensorflow.Core.Framework.Summary
maybe'int64List 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
maybe'kind 
1 (Function)Proto.Tensorflow.Core.Example.Feature
2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
3 (Function)Proto.Tensorflow.Core.Util.TestLog
maybe'lengthProto.Tensorflow.Core.Framework.TensorSlice
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'localityProto.Tensorflow.Core.Framework.DeviceAttributes
maybe'logMessageProto.Tensorflow.Core.Util.Event
maybe'machineConfigurationProto.Tensorflow.Core.Util.TestLog
maybe'memoryInfoProto.Tensorflow.Core.Util.TestLog
maybe'memoryStatsProto.Tensorflow.Core.Framework.StepStats
maybe'metaProto.Tensorflow.Core.Util.SavedTensorSlice
maybe'metadataProto.Tensorflow.Core.Framework.Summary
maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
maybe'metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'nameProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'nodeListProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'platformInfoProto.Tensorflow.Core.Util.TestLog
maybe'pluginDataProto.Tensorflow.Core.Framework.Summary
maybe'rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'runConfigurationProto.Tensorflow.Core.Util.TestLog
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
maybe'sessionLogProto.Tensorflow.Core.Util.Event
maybe'shape 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
4 (Function)Proto.Tensorflow.Core.Framework.CostGraph
5 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
6 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
maybe'signatureProto.Tensorflow.Core.Framework.Function
maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
maybe'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'stringValueProto.Tensorflow.Core.Util.TestLog
maybe'strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'summaryProto.Tensorflow.Core.Util.Event
maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
maybe'tensor 
1 (Function)Proto.Tensorflow.Core.Framework.LogMemory
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
3 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
4 (Function)Proto.Tensorflow.Core.Framework.Summary
maybe'tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
maybe'tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
maybe'tensorShape 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Function)Proto.Tensorflow.Core.Example.Feature
3 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
4 (Function)Proto.Tensorflow.Core.Framework.Function
5 (Function)Proto.Tensorflow.Core.Framework.NodeDef
6 (Function)Proto.Tensorflow.Core.Framework.AttrValue
7 (Function)Proto.Tensorflow.Core.Framework.Summary
8 (Function)Proto.Tensorflow.Core.Util.TestLog
maybe'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
maybe'varLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
maybe'versionProto.Tensorflow.Core.Protobuf.TensorBundle
maybe'versions 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
maybe'whatProto.Tensorflow.Core.Util.Event
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
mean 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mean' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
MemmappedFileSystemDirectory 
1 (Data Constructor)Proto.Tensorflow.Core.Util.MemmappedFileSystem
2 (Type/Class)Proto.Tensorflow.Core.Util.MemmappedFileSystem
MemmappedFileSystemDirectoryElement 
1 (Data Constructor)Proto.Tensorflow.Core.Util.MemmappedFileSystem
2 (Type/Class)Proto.Tensorflow.Core.Util.MemmappedFileSystem
memoryProto.Tensorflow.Core.Framework.StepStats
MemoryInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
memoryInfoProto.Tensorflow.Core.Util.TestLog
memoryLimit 
1 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
2 (Function)Proto.Tensorflow.Core.Util.TestLog
MemoryLogRawAllocation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogRawDeallocation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogStep 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogTensorAllocation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogTensorDeallocation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
MemoryLogTensorOutput 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
2 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
memoryOptimizationProto.Tensorflow.Core.Protobuf.RewriterConfig
MemoryStats 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
memoryStatsProto.Tensorflow.Core.Framework.StepStats
memoryTimeProto.Tensorflow.Core.Framework.CostGraph
mergeTensorFlow.GenOps.Core
merge'TensorFlow.GenOps.Core
mergeAllSummariesTensorFlow.Logging
mergeSummaryTensorFlow.GenOps.Core
mergeSummary'TensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
mergeV2Checkpoints'TensorFlow.GenOps.Core
messageProto.Tensorflow.Core.Util.Event
metaProto.Tensorflow.Core.Util.SavedTensorSlice
metadataProto.Tensorflow.Core.Framework.Summary
MetaGraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
metaGraphDefProto.Tensorflow.Core.Util.Event
MetaGraphDef'CollectionDefEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
MetaGraphDef'MetaInfoDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
MetaGraphDef'SignatureDefEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
metaGraphsProto.Tensorflow.Core.Protobuf.SavedModel
metaGraphVersionProto.Tensorflow.Core.Protobuf.MetaGraph
metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
methodNameProto.Tensorflow.Core.Protobuf.MetaGraph
mfccTensorFlow.GenOps.Core
mfcc'TensorFlow.GenOps.Core
mhzPerCpuProto.Tensorflow.Core.Util.TestLog
min 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
min'TensorFlow.GenOps.Core
minConsumerProto.Tensorflow.Core.Framework.Versions
MinimizerTensorFlow.Minimize
minimizeWithTensorFlow.Minimize
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
minimum'TensorFlow.GenOps.Core
mirrorPadTensorFlow.GenOps.Core
mirrorPad'TensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
mirrorPadGrad'TensorFlow.GenOps.Core
MixedListArgTensorFlow.OpGen.ParsedOp
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mod'TensorFlow.GenOps.Core
modeProto.Tensorflow.Core.Util.TestLog
modelProto.Tensorflow.Core.Util.TestLog
MonadBuildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
msgProto.Tensorflow.Core.Util.Event
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
multinomial'TensorFlow.GenOps.Core
mutableDenseHashTableTensorFlow.GenOps.Core
mutableDenseHashTable'TensorFlow.GenOps.Core
mutableDenseHashTableV2TensorFlow.GenOps.Core
mutableDenseHashTableV2'TensorFlow.GenOps.Core
mutableHashTableTensorFlow.GenOps.Core
mutableHashTable'TensorFlow.GenOps.Core
mutableHashTableOfTensorsTensorFlow.GenOps.Core
mutableHashTableOfTensors'TensorFlow.GenOps.Core
mutableHashTableOfTensorsV2TensorFlow.GenOps.Core
mutableHashTableOfTensorsV2'TensorFlow.GenOps.Core
mutableHashTableV2TensorFlow.GenOps.Core
mutableHashTableV2'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-N.html b/docs/haddock/doc-index-N.html index 886aa59..ed1d5f4 100644 --- a/docs/haddock/doc-index-N.html +++ b/docs/haddock/doc-index-N.html @@ -1,4 +1,4 @@ - (Index - N)

 

Index - N

Name 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
name 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
4 (Function)Proto.Tensorflow.Core.Framework.TensorShape
5 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
NameAttrList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NameAttrList'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
neg 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
neg' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
negTrainTensorFlow.GenOps.Core
negTrain'TensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nextIteration'TensorFlow.GenOps.Core
NilTensorFlow.Types
nodeProto.Tensorflow.Core.Framework.Graph
NodeDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeName 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
nodeNameProto.Tensorflow.Core.Framework.Summary
NodesTensorFlow.Nodes, TensorFlow.Core
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
nonMaxSuppressionTensorFlow.GenOps.Core
nonMaxSuppression'TensorFlow.GenOps.Core
noOp 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
noOp'TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
notEqual'TensorFlow.GenOps.Core
numProto.Tensorflow.Core.Framework.Summary
numberAttrProto.Tensorflow.Core.Framework.OpDef
numChannelsProto.Tensorflow.Core.Framework.Summary
numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file +

 

Index - N

Name 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
name 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.KernelDef
5 (Function)Proto.Tensorflow.Core.Framework.AttrValue
6 (Function)Proto.Tensorflow.Core.Framework.CostGraph
7 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
8 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
9 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
10 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
11 (Function)Proto.Tensorflow.Core.Framework.TensorShape
12 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
13 (Function)Proto.Tensorflow.Core.Util.MemmappedFileSystem
14 (Function)Proto.Tensorflow.Core.Util.TestLog
NameAttrList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NameAttrList'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NamedTensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.NamedTensor
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.NamedTensor
neg 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
neg' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
negTrainTensorFlow.GenOps.Core
negTrain'TensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nextIteration'TensorFlow.GenOps.Core
NilTensorFlow.Types
node 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
NodeDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
nodeDefProto.Tensorflow.Core.Framework.Function
NodeDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeExecStats 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
nodeListProto.Tensorflow.Core.Protobuf.MetaGraph
NodeName 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
nodeName 
1 (Function)Proto.Tensorflow.Core.Framework.StepStats
2 (Function)Proto.Tensorflow.Core.Framework.Summary
3 (Function)Proto.Tensorflow.Core.Protobuf.Debug
NodeOutput 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
NodesTensorFlow.Nodes, TensorFlow.Core
nodeStatsProto.Tensorflow.Core.Framework.StepStats
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
nonMaxSuppressionTensorFlow.GenOps.Core
nonMaxSuppression'TensorFlow.GenOps.Core
nonMaxSuppressionV2TensorFlow.GenOps.Core
nonMaxSuppressionV2'TensorFlow.GenOps.Core
noOp 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
noOp'TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
notEqual'TensorFlow.GenOps.Core
NOT_FOUNDProto.Tensorflow.Core.Lib.Core.ErrorCodes
numProto.Tensorflow.Core.Framework.Summary
numberAttrProto.Tensorflow.Core.Framework.OpDef
numBytesProto.Tensorflow.Core.Framework.LogMemory
numChannelsProto.Tensorflow.Core.Framework.Summary
numCoresProto.Tensorflow.Core.Util.TestLog
numCoresAllowedProto.Tensorflow.Core.Util.TestLog
numReplicasProto.Tensorflow.Core.Protobuf.RewriterConfig
numShardsProto.Tensorflow.Core.Protobuf.TensorBundle
numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-O.html b/docs/haddock/doc-index-O.html index 0aba0f2..6bc057f 100644 --- a/docs/haddock/doc-index-O.html +++ b/docs/haddock/doc-index-O.html @@ -1,4 +1,4 @@ - (Index - O)

 

Index - O

obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
oneHot 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
oneHot' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
OneOfTensorFlow.Types, TensorFlow.Core
OneOfsTensorFlow.Types
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
opAttrTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
OpParamsTensorFlow.BuildOp
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
OptionsTensorFlow.Session, TensorFlow.Core
optLevelProto.Tensorflow.Core.Protobuf.Config
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputTensorFlow.Output
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputNodeNameTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file +

 

Index - O

obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
offset 
1 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
2 (Function)Proto.Tensorflow.Core.Util.MemmappedFileSystem
OKProto.Tensorflow.Core.Lib.Core.ErrorCodes
oneHot 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
oneHot' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
OneOfTensorFlow.Types, TensorFlow.Core
OneOfsTensorFlow.Types
onesLikeTensorFlow.GenOps.Core
onesLike'TensorFlow.GenOps.Core
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.KernelDef
opAttrTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
operationProto.Tensorflow.Core.Framework.LogMemory
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
OpParamsTensorFlow.BuildOp
opStartRelMicrosProto.Tensorflow.Core.Framework.StepStats
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
optimizersProto.Tensorflow.Core.Protobuf.RewriterConfig
optimizeTensorLayoutProto.Tensorflow.Core.Protobuf.RewriterConfig
OptionsTensorFlow.Session, TensorFlow.Core
optLevelProto.Tensorflow.Core.Protobuf.Config
optsProto.Tensorflow.Core.Util.TestLog
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
orderedMapClearTensorFlow.GenOps.Core
orderedMapClear'TensorFlow.GenOps.Core
orderedMapIncompleteSizeTensorFlow.GenOps.Core
orderedMapIncompleteSize'TensorFlow.GenOps.Core
orderedMapPeekTensorFlow.GenOps.Core
orderedMapPeek'TensorFlow.GenOps.Core
orderedMapSizeTensorFlow.GenOps.Core
orderedMapSize'TensorFlow.GenOps.Core
orderedMapStageTensorFlow.GenOps.Core
orderedMapStage'TensorFlow.GenOps.Core
orderedMapUnstageTensorFlow.GenOps.Core
orderedMapUnstage'TensorFlow.GenOps.Core
orderedMapUnstageNoKeyTensorFlow.GenOps.Core
orderedMapUnstageNoKey'TensorFlow.GenOps.Core
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
output 
1 (Function)TensorFlow.Output
2 (Function)Proto.Tensorflow.Core.Framework.StepStats
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
outputInfoProto.Tensorflow.Core.Framework.CostGraph
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputNodeNameTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
outputsProto.Tensorflow.Core.Protobuf.MetaGraph
outputSlotProto.Tensorflow.Core.Protobuf.Debug
OUT_OF_RANGEProto.Tensorflow.Core.Lib.Core.ErrorCodes
\ No newline at end of file diff --git a/docs/haddock/doc-index-P.html b/docs/haddock/doc-index-P.html index ff91dfa..c3e819c 100644 --- a/docs/haddock/doc-index-P.html +++ b/docs/haddock/doc-index-P.html @@ -1,4 +1,4 @@ - (Index - P)

 

Index - P

pack 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
pack' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
padTensorFlow.GenOps.Core
pad'TensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
paddingFIFOQueue'TensorFlow.GenOps.Core
paddingFIFOQueueV2TensorFlow.GenOps.Core
paddingFIFOQueueV2'TensorFlow.GenOps.Core
parallelConcatTensorFlow.GenOps.Core
parallelConcat'TensorFlow.GenOps.Core
parameterizedTruncatedNormalTensorFlow.GenOps.Core
parameterizedTruncatedNormal'TensorFlow.GenOps.Core
ParsedArg 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
ParsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgDescriptionTensorFlow.OpGen.ParsedOp
parsedArgNameTensorFlow.OpGen.ParsedOp
parsedInputsTensorFlow.OpGen.ParsedOp
ParsedOp 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
parsedOpDescriptionTensorFlow.OpGen.ParsedOp
parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
parsedOpNameTensorFlow.OpGen.ParsedOp
parsedOpSummaryTensorFlow.OpGen.ParsedOp
parsedOutputsTensorFlow.OpGen.ParsedOp
parseExampleTensorFlow.GenOps.Core
parseExample'TensorFlow.GenOps.Core
parseOpTensorFlow.OpGen.ParsedOp
parseSingleSequenceExampleTensorFlow.GenOps.Core
parseSingleSequenceExample'TensorFlow.GenOps.Core
parseTensorTensorFlow.GenOps.Core
parseTensor'TensorFlow.GenOps.Core
partitionGraphsProto.Tensorflow.Core.Protobuf.Config
PendingNodeNameTensorFlow.Output
perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
placeholder 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
placeholder' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
placeholderV2TensorFlow.GenOps.Core
placeholderV2'TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
placeholderWithDefault'TensorFlow.GenOps.Core
placementPeriodProto.Tensorflow.Core.Protobuf.Config
placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
polygammaTensorFlow.GenOps.Core
polygamma'TensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
pow'TensorFlow.GenOps.Core
prefixTensorFlow.OpGen
preventGradientTensorFlow.GenOps.Core
preventGradient'TensorFlow.GenOps.Core
printTensorFlow.GenOps.Core
print'TensorFlow.GenOps.Core
priorityQueueTensorFlow.GenOps.Core
priorityQueue'TensorFlow.GenOps.Core
priorityQueueV2TensorFlow.GenOps.Core
priorityQueueV2'TensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
prod'TensorFlow.GenOps.Core
protoShapeTensorFlow.Types
pureOpTensorFlow.BuildOp
PureResultTensorFlow.BuildOp
pureResultTensorFlow.BuildOp
putTFRecordTensorFlow.Records
putTFRecordDataTensorFlow.Records
putTFRecordLengthTensorFlow.Records
putVarIntTensorFlow.Internal.VarInt
\ No newline at end of file +

 

Index - P

pack 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
pack' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
padTensorFlow.GenOps.Core
pad'TensorFlow.GenOps.Core
paddedBatchDatasetTensorFlow.GenOps.Core
paddedBatchDataset'TensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
paddingFIFOQueue'TensorFlow.GenOps.Core
paddingFIFOQueueV2TensorFlow.GenOps.Core
paddingFIFOQueueV2'TensorFlow.GenOps.Core
padV2TensorFlow.GenOps.Core
padV2'TensorFlow.GenOps.Core
parallelConcatTensorFlow.GenOps.Core
parallelConcat'TensorFlow.GenOps.Core
parallelIterationsProto.Tensorflow.Core.Protobuf.ControlFlow
parameterizedTruncatedNormalTensorFlow.GenOps.Core
parameterizedTruncatedNormal'TensorFlow.GenOps.Core
ParsedArg 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
ParsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgDescriptionTensorFlow.OpGen.ParsedOp
parsedArgNameTensorFlow.OpGen.ParsedOp
parsedInputsTensorFlow.OpGen.ParsedOp
ParsedOp 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
parsedOpDescriptionTensorFlow.OpGen.ParsedOp
parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
parsedOpNameTensorFlow.OpGen.ParsedOp
parsedOpSummaryTensorFlow.OpGen.ParsedOp
parsedOutputsTensorFlow.OpGen.ParsedOp
parseExampleTensorFlow.GenOps.Core
parseExample'TensorFlow.GenOps.Core
parseOpTensorFlow.OpGen.ParsedOp
parseSingleSequenceExampleTensorFlow.GenOps.Core
parseSingleSequenceExample'TensorFlow.GenOps.Core
parseTensorTensorFlow.GenOps.Core
parseTensor'TensorFlow.GenOps.Core
partitionGraphsProto.Tensorflow.Core.Protobuf.Config
peakBytesProto.Tensorflow.Core.Framework.StepStats
PendingNodeNameTensorFlow.Output
PERMISSION_DENIEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
physicalDescriptionProto.Tensorflow.Core.Util.TestLog
physicalDeviceDescProto.Tensorflow.Core.Framework.DeviceAttributes
pivotForBodyNameProto.Tensorflow.Core.Protobuf.ControlFlow
pivotForPredNameProto.Tensorflow.Core.Protobuf.ControlFlow
pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
placeholder 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
placeholder' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
placeholderV2TensorFlow.GenOps.Core
placeholderV2'TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
placeholderWithDefault'TensorFlow.GenOps.Core
placementPeriodProto.Tensorflow.Core.Protobuf.Config
placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
PlatformInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
platformInfoProto.Tensorflow.Core.Util.TestLog
pluginDataProto.Tensorflow.Core.Framework.Summary
pluginNameProto.Tensorflow.Core.Framework.Summary
pollingActiveDelayUsecsProto.Tensorflow.Core.Protobuf.Config
pollingInactiveDelayMsecsProto.Tensorflow.Core.Protobuf.Config
polygammaTensorFlow.GenOps.Core
polygamma'TensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
pow'TensorFlow.GenOps.Core
precedingNodeProto.Tensorflow.Core.Framework.CostGraph
precedingPortProto.Tensorflow.Core.Framework.CostGraph
predNameProto.Tensorflow.Core.Protobuf.ControlFlow
prefixTensorFlow.OpGen
preventGradientTensorFlow.GenOps.Core
preventGradient'TensorFlow.GenOps.Core
printTensorFlow.GenOps.Core
print'TensorFlow.GenOps.Core
priorityQueueTensorFlow.GenOps.Core
priorityQueue'TensorFlow.GenOps.Core
priorityQueueV2TensorFlow.GenOps.Core
priorityQueueV2'TensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
prod'TensorFlow.GenOps.Core
producerProto.Tensorflow.Core.Framework.Versions
protocolProto.Tensorflow.Core.Protobuf.TensorflowServer
protoShapeTensorFlow.Types
ptr 
1 (Function)Proto.Tensorflow.Core.Framework.LogMemory
2 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
pureOpTensorFlow.BuildOp
PureResultTensorFlow.BuildOp
pureResultTensorFlow.BuildOp
putTFRecordTensorFlow.Records
putTFRecordDataTensorFlow.Records
putTFRecordLengthTensorFlow.Records
putVarIntTensorFlow.Internal.VarInt
\ No newline at end of file diff --git a/docs/haddock/doc-index-Q.html b/docs/haddock/doc-index-Q.html index 08edba9..b7f0b88 100644 --- a/docs/haddock/doc-index-Q.html +++ b/docs/haddock/doc-index-Q.html @@ -1,4 +1,4 @@ - (Index - Q)

 

Index - Q

qrTensorFlow.GenOps.Core
qr'TensorFlow.GenOps.Core
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizeAndDequantize'TensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedAvgPool'TensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedBiasAdd'TensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConcat'TensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedConv2D'TensorFlow.GenOps.Core
quantizedInstanceNormTensorFlow.GenOps.Core
quantizedInstanceNorm'TensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMatMul'TensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizedMaxPool'TensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu'TensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedRelu6'TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReluX'TensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizedReshape'TensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
quantizeV2'TensorFlow.GenOps.Core
QueueTensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueClose'TensorFlow.GenOps.Core
queueCloseV2TensorFlow.GenOps.Core
queueCloseV2'TensorFlow.GenOps.Core
queueDequeueTensorFlow.GenOps.Core
queueDequeue'TensorFlow.GenOps.Core
queueDequeueManyTensorFlow.GenOps.Core
queueDequeueMany'TensorFlow.GenOps.Core
queueDequeueManyV2TensorFlow.GenOps.Core
queueDequeueManyV2'TensorFlow.GenOps.Core
queueDequeueUpToTensorFlow.GenOps.Core
queueDequeueUpTo'TensorFlow.GenOps.Core
queueDequeueUpToV2TensorFlow.GenOps.Core
queueDequeueUpToV2'TensorFlow.GenOps.Core
queueDequeueV2TensorFlow.GenOps.Core
queueDequeueV2'TensorFlow.GenOps.Core
queueEnqueueTensorFlow.GenOps.Core
queueEnqueue'TensorFlow.GenOps.Core
queueEnqueueManyTensorFlow.GenOps.Core
queueEnqueueMany'TensorFlow.GenOps.Core
queueEnqueueManyV2TensorFlow.GenOps.Core
queueEnqueueManyV2'TensorFlow.GenOps.Core
queueEnqueueV2TensorFlow.GenOps.Core
queueEnqueueV2'TensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
queueSize'TensorFlow.GenOps.Core
queueSizeV2TensorFlow.GenOps.Core
queueSizeV2'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - Q

qrTensorFlow.GenOps.Core
qr'TensorFlow.GenOps.Core
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizeAndDequantize'TensorFlow.GenOps.Core
quantizeAndDequantizeV2TensorFlow.GenOps.Core
quantizeAndDequantizeV2'TensorFlow.GenOps.Core
quantizeAndDequantizeV3TensorFlow.GenOps.Core
quantizeAndDequantizeV3'TensorFlow.GenOps.Core
quantizedAddTensorFlow.GenOps.Core
quantizedAdd'TensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedAvgPool'TensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedBiasAdd'TensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConcat'TensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedConv2D'TensorFlow.GenOps.Core
quantizedInstanceNormTensorFlow.GenOps.Core
quantizedInstanceNorm'TensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMatMul'TensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizedMaxPool'TensorFlow.GenOps.Core
quantizedMulTensorFlow.GenOps.Core
quantizedMul'TensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu'TensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedRelu6'TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReluX'TensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizedReshape'TensorFlow.GenOps.Core
quantizedResizeBilinearTensorFlow.GenOps.Core
quantizedResizeBilinear'TensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
quantizeV2'TensorFlow.GenOps.Core
QueueTensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueClose'TensorFlow.GenOps.Core
queueClosedExceptionTypesProto.Tensorflow.Core.Protobuf.QueueRunner
queueCloseV2TensorFlow.GenOps.Core
queueCloseV2'TensorFlow.GenOps.Core
queueDequeueTensorFlow.GenOps.Core
queueDequeue'TensorFlow.GenOps.Core
queueDequeueManyTensorFlow.GenOps.Core
queueDequeueMany'TensorFlow.GenOps.Core
queueDequeueManyV2TensorFlow.GenOps.Core
queueDequeueManyV2'TensorFlow.GenOps.Core
queueDequeueUpToTensorFlow.GenOps.Core
queueDequeueUpTo'TensorFlow.GenOps.Core
queueDequeueUpToV2TensorFlow.GenOps.Core
queueDequeueUpToV2'TensorFlow.GenOps.Core
queueDequeueV2TensorFlow.GenOps.Core
queueDequeueV2'TensorFlow.GenOps.Core
queueEnqueueTensorFlow.GenOps.Core
queueEnqueue'TensorFlow.GenOps.Core
queueEnqueueManyTensorFlow.GenOps.Core
queueEnqueueMany'TensorFlow.GenOps.Core
queueEnqueueManyV2TensorFlow.GenOps.Core
queueEnqueueManyV2'TensorFlow.GenOps.Core
queueEnqueueV2TensorFlow.GenOps.Core
queueEnqueueV2'TensorFlow.GenOps.Core
queueIsClosedTensorFlow.GenOps.Core
queueIsClosed'TensorFlow.GenOps.Core
queueIsClosedV2TensorFlow.GenOps.Core
queueIsClosedV2'TensorFlow.GenOps.Core
queueNameProto.Tensorflow.Core.Protobuf.QueueRunner
QueueRunnerDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.QueueRunner
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.QueueRunner
queueSizeTensorFlow.GenOps.Core
queueSize'TensorFlow.GenOps.Core
queueSizeV2TensorFlow.GenOps.Core
queueSizeV2'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-R.html b/docs/haddock/doc-index-R.html index c817ecb..4589ad2 100644 --- a/docs/haddock/doc-index-R.html +++ b/docs/haddock/doc-index-R.html @@ -1,4 +1,4 @@ - (Index - R)

 

Index - R

randomCropTensorFlow.GenOps.Core
randomCrop'TensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomGamma'TensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffle'TensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomShuffleQueue'TensorFlow.GenOps.Core
randomShuffleQueueV2TensorFlow.GenOps.Core
randomShuffleQueueV2'TensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomStandardNormal'TensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniform'TensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
randomUniformInt'TensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
range' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rankTensorFlow.GenOps.Core
rank'TensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumRecordsProduced'TensorFlow.GenOps.Core
readerNumRecordsProducedV2TensorFlow.GenOps.Core
readerNumRecordsProducedV2'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerRead'TensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerReadUpTo'TensorFlow.GenOps.Core
readerReadUpToV2TensorFlow.GenOps.Core
readerReadUpToV2'TensorFlow.GenOps.Core
readerReadV2TensorFlow.GenOps.Core
readerReadV2'TensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerReset'TensorFlow.GenOps.Core
readerResetV2TensorFlow.GenOps.Core
readerResetV2'TensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerRestoreState'TensorFlow.GenOps.Core
readerRestoreStateV2TensorFlow.GenOps.Core
readerRestoreStateV2'TensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readerSerializeState'TensorFlow.GenOps.Core
readerSerializeStateV2TensorFlow.GenOps.Core
readerSerializeStateV2'TensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readFile'TensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
readVariableOpTensorFlow.GenOps.Core
readVariableOp'TensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
real'TensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
realDiv'TensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocal'TensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reciprocalGrad'TensorFlow.GenOps.Core
recordInputTensorFlow.GenOps.Core
recordInput'TensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
reduceJoin'TensorFlow.GenOps.Core
Ref 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
refEnterTensorFlow.GenOps.Core
refEnter'TensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refExit'TensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refIdentity'TensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refMerge'TensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refNextIteration'TensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSelect'TensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
refSwitch'TensorFlow.GenOps.Core
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6'TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
relu6Grad'TensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reluGrad' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
renderTensorFlow.Tensor, TensorFlow.Core
RenderedTensorFlow.Tensor
renderedTensorFlow.Tensor
renderedNodeDefsTensorFlow.Build
renderedOutputTensorFlow.Tensor
renderValueTensorFlow.Tensor
requantizationRangeTensorFlow.GenOps.Core
requantizationRange'TensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
requantize'TensorFlow.GenOps.Core
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reshape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeArea'TensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBicubic'TensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinear'TensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeBilinearGrad'TensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighbor'TensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resizeNearestNeighborGrad'TensorFlow.GenOps.Core
resourceApplyAdadeltaTensorFlow.GenOps.Core
resourceApplyAdadelta'TensorFlow.GenOps.Core
resourceApplyAdagradTensorFlow.GenOps.Core
resourceApplyAdagrad'TensorFlow.GenOps.Core
resourceApplyAdagradDATensorFlow.GenOps.Core
resourceApplyAdagradDA'TensorFlow.GenOps.Core
resourceApplyAdamTensorFlow.GenOps.Core
resourceApplyAdam'TensorFlow.GenOps.Core
resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceApplyFtrlTensorFlow.GenOps.Core
resourceApplyFtrl'TensorFlow.GenOps.Core
resourceApplyGradientDescentTensorFlow.GenOps.Core
resourceApplyGradientDescent'TensorFlow.GenOps.Core
resourceApplyMomentumTensorFlow.GenOps.Core
resourceApplyMomentum'TensorFlow.GenOps.Core
resourceApplyProximalAdagradTensorFlow.GenOps.Core
resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceApplyRMSPropTensorFlow.GenOps.Core
resourceApplyRMSProp'TensorFlow.GenOps.Core
ResourceArgTensorFlow.OpGen.ParsedOp
resourceGatherTensorFlow.GenOps.Core
resourceGather'TensorFlow.GenOps.Core
ResourceHandle 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
4 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
resourceScatterAddTensorFlow.GenOps.Core
resourceScatterAdd'TensorFlow.GenOps.Core
resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
resourceSparseApplyAdagradTensorFlow.GenOps.Core
resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyAdagradDATensorFlow.GenOps.Core
resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceSparseApplyFtrlTensorFlow.GenOps.Core
resourceSparseApplyFtrl'TensorFlow.GenOps.Core
resourceSparseApplyMomentumTensorFlow.GenOps.Core
resourceSparseApplyMomentum'TensorFlow.GenOps.Core
resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceSparseApplyRMSPropTensorFlow.GenOps.Core
resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restore'TensorFlow.GenOps.Core
restoreFromNameTensorFlow.Ops
restoreSliceTensorFlow.GenOps.Core
restoreSlice'TensorFlow.GenOps.Core
restoreV2TensorFlow.GenOps.Core
restoreV2'TensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverse'TensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseSequence'TensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
reverseV2'TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rGBToHSV'TensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
rint'TensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
round'TensorFlow.GenOps.Core
RPCOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
rpcOptionsProto.Tensorflow.Core.Protobuf.Config
rsqrtTensorFlow.GenOps.Core
rsqrt'TensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
rsqrtGrad'TensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session, TensorFlow.Core
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
runMetadataProto.Tensorflow.Core.Util.Event
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runRefTensorFlow.Tensor
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runValueTensorFlow.Tensor
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
\ No newline at end of file +

 

Index - R

randomCropTensorFlow.GenOps.Core
randomCrop'TensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomGamma'TensorFlow.GenOps.Core
randomPoissonTensorFlow.GenOps.Core
randomPoisson'TensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffle'TensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomShuffleQueue'TensorFlow.GenOps.Core
randomShuffleQueueV2TensorFlow.GenOps.Core
randomShuffleQueueV2'TensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomStandardNormal'TensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniform'TensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
randomUniformInt'TensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
range' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rangeDatasetTensorFlow.GenOps.Core
rangeDataset'TensorFlow.GenOps.Core
rankTensorFlow.GenOps.Core
rank'TensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumRecordsProduced'TensorFlow.GenOps.Core
readerNumRecordsProducedV2TensorFlow.GenOps.Core
readerNumRecordsProducedV2'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerRead'TensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerReadUpTo'TensorFlow.GenOps.Core
readerReadUpToV2TensorFlow.GenOps.Core
readerReadUpToV2'TensorFlow.GenOps.Core
readerReadV2TensorFlow.GenOps.Core
readerReadV2'TensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerReset'TensorFlow.GenOps.Core
readerResetV2TensorFlow.GenOps.Core
readerResetV2'TensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerRestoreState'TensorFlow.GenOps.Core
readerRestoreStateV2TensorFlow.GenOps.Core
readerRestoreStateV2'TensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readerSerializeState'TensorFlow.GenOps.Core
readerSerializeStateV2TensorFlow.GenOps.Core
readerSerializeStateV2'TensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readFile'TensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
readValueTensorFlow.Variable
readVariableOpTensorFlow.GenOps.Core
readVariableOp'TensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
real'TensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
realDiv'TensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocal'TensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reciprocalGrad'TensorFlow.GenOps.Core
recordInputTensorFlow.GenOps.Core
recordInput'TensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
reduceJoin'TensorFlow.GenOps.Core
reduceMeanTensorFlow.Ops
reduceMean'TensorFlow.Ops
reduceSumTensorFlow.Ops
reduceSum'TensorFlow.Ops
Ref 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
refEnterTensorFlow.GenOps.Core
refEnter'TensorFlow.GenOps.Core
referencedTensorProto.Tensorflow.Core.Framework.StepStats
refExitTensorFlow.GenOps.Core
refExit'TensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refIdentity'TensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refMerge'TensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refNextIteration'TensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSelect'TensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
refSwitch'TensorFlow.GenOps.Core
releaseProto.Tensorflow.Core.Util.TestLog
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6'TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
relu6Grad'TensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reluGrad' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
remoteFusedGraphExecuteTensorFlow.GenOps.Core
remoteFusedGraphExecute'TensorFlow.GenOps.Core
renderTensorFlow.Tensor, TensorFlow.Core
RenderedTensorFlow.Tensor
renderedNodeDefsTensorFlow.Build
renderedOutputTensorFlow.Tensor
renderValueTensorFlow.Tensor
repeatDatasetTensorFlow.GenOps.Core
repeatDataset'TensorFlow.GenOps.Core
requantizationRangeTensorFlow.GenOps.Core
requantizationRange'TensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
requantize'TensorFlow.GenOps.Core
requestedBytesProto.Tensorflow.Core.Framework.AllocationDescription
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reshape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeArea'TensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBicubic'TensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinear'TensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeBilinearGrad'TensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighbor'TensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resizeNearestNeighborGrad'TensorFlow.GenOps.Core
resourceApplyAdadeltaTensorFlow.GenOps.Core
resourceApplyAdadelta'TensorFlow.GenOps.Core
resourceApplyAdagradTensorFlow.GenOps.Core
resourceApplyAdagrad'TensorFlow.GenOps.Core
resourceApplyAdagradDATensorFlow.GenOps.Core
resourceApplyAdagradDA'TensorFlow.GenOps.Core
resourceApplyAdam 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
resourceApplyAdam' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceApplyFtrlTensorFlow.GenOps.Core
resourceApplyFtrl'TensorFlow.GenOps.Core
resourceApplyFtrlV2TensorFlow.GenOps.Core
resourceApplyFtrlV2'TensorFlow.GenOps.Core
resourceApplyGradientDescentTensorFlow.GenOps.Core
resourceApplyGradientDescent'TensorFlow.GenOps.Core
resourceApplyMomentumTensorFlow.GenOps.Core
resourceApplyMomentum'TensorFlow.GenOps.Core
resourceApplyProximalAdagradTensorFlow.GenOps.Core
resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceApplyRMSPropTensorFlow.GenOps.Core
resourceApplyRMSProp'TensorFlow.GenOps.Core
resourceGatherTensorFlow.GenOps.Core
resourceGather'TensorFlow.GenOps.Core
ResourceHandleTensorFlow.Types, TensorFlow.Core
ResourceHandleProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
2 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
resourceScatterAddTensorFlow.GenOps.Core
resourceScatterAdd'TensorFlow.GenOps.Core
resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
resourceSparseApplyAdagradTensorFlow.GenOps.Core
resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyAdagradDATensorFlow.GenOps.Core
resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceSparseApplyFtrlTensorFlow.GenOps.Core
resourceSparseApplyFtrl'TensorFlow.GenOps.Core
resourceSparseApplyFtrlV2TensorFlow.GenOps.Core
resourceSparseApplyFtrlV2'TensorFlow.GenOps.Core
resourceSparseApplyMomentumTensorFlow.GenOps.Core
resourceSparseApplyMomentum'TensorFlow.GenOps.Core
resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceSparseApplyRMSPropTensorFlow.GenOps.Core
resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
resourceStridedSliceAssignTensorFlow.GenOps.Core
resourceStridedSliceAssign'TensorFlow.GenOps.Core
RESOURCE_EXHAUSTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restore'TensorFlow.GenOps.Core
restoreFromNameTensorFlow.Ops
restoreOpNameProto.Tensorflow.Core.Protobuf.Saver
restoreSliceTensorFlow.GenOps.Core
restoreSlice'TensorFlow.GenOps.Core
restoreV2TensorFlow.GenOps.Core
restoreV2'TensorFlow.GenOps.Core
retProto.Tensorflow.Core.Framework.Function
reverseTensorFlow.GenOps.Core
reverse'TensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseSequence'TensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
reverseV2'TensorFlow.GenOps.Core
rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
RewriterConfig 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.RewriterConfig
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.RewriterConfig
RewriterConfig'HEURISTICSProto.Tensorflow.Core.Protobuf.RewriterConfig
RewriterConfig'MANUALProto.Tensorflow.Core.Protobuf.RewriterConfig
RewriterConfig'MemOptTypeProto.Tensorflow.Core.Protobuf.RewriterConfig
RewriterConfig'NO_MEM_OPTProto.Tensorflow.Core.Protobuf.RewriterConfig
rFFTTensorFlow.GenOps.Core
rFFT'TensorFlow.GenOps.Core
rFFT2DTensorFlow.GenOps.Core
rFFT2D'TensorFlow.GenOps.Core
rFFT3DTensorFlow.GenOps.Core
rFFT3D'TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rGBToHSV'TensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
rint'TensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
round'TensorFlow.GenOps.Core
RPCOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
rpcOptionsProto.Tensorflow.Core.Protobuf.Config
rsqrtTensorFlow.GenOps.Core
rsqrt'TensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
rsqrtGrad'TensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session, TensorFlow.Core
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunConfiguration 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
runConfigurationProto.Tensorflow.Core.Util.TestLog
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
runMetadataProto.Tensorflow.Core.Util.Event
runModeProto.Tensorflow.Core.Util.TestLog
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runRefTensorFlow.Tensor
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runTimeProto.Tensorflow.Core.Util.TestLog
runValueTensorFlow.Tensor
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-S.html b/docs/haddock/doc-index-S.html index 5d4d4d9..adca720 100644 --- a/docs/haddock/doc-index-S.html +++ b/docs/haddock/doc-index-S.html @@ -1,4 +1,4 @@ - (Index - S)

 

Index - S

sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
sampleDistortedBoundingBox'TensorFlow.GenOps.Core
sampleRateProto.Tensorflow.Core.Framework.Summary
save 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
save'TensorFlow.GenOps.Core
saveSlicesTensorFlow.GenOps.Core
saveSlices'TensorFlow.GenOps.Core
saveV2TensorFlow.GenOps.Core
saveV2'TensorFlow.GenOps.Core
Scalar 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
scalarTensorFlow.Ops
scalar'TensorFlow.Ops
scalarizeTensorFlow.Ops
scalarSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
scalarSummary'TensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterAdd'TensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterDiv'TensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterMul'TensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNd'TensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdAdd'TensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdSub'TensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterNdUpdate'TensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterSub'TensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scatterUpdate'TensorFlow.GenOps.Core
scomplexValProto.Tensorflow.Core.Framework.Tensor
sdcaFprintTensorFlow.GenOps.Core
sdcaFprint'TensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaOptimizer'TensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
sdcaShrinkL1'TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMax'TensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMean'TensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentMin'TensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentProd'TensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
segmentSum'TensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
select'TensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEig'TensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
selfAdjointEigV2'TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeManySparse'TensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
serializeSparse'TensorFlow.GenOps.Core
Session 
1 (Type/Class)TensorFlow.Session, TensorFlow.Core
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
SessionLog 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
sessionLogProto.Tensorflow.Core.Util.Event
SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
SessionLog'STARTProto.Tensorflow.Core.Util.Event
SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
SessionLog'STOPProto.Tensorflow.Core.Util.Event
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
setSizeTensorFlow.GenOps.Core
setSize'TensorFlow.GenOps.Core
Shape 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
shape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
shapeNTensorFlow.GenOps.Core
shapeN'TensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilename'TensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
shardedFilespec'TensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoid'TensorFlow.GenOps.Core
sigmoidCrossEntropyWithLogitsTensorFlow.NN
sigmoidGradTensorFlow.GenOps.Core
sigmoidGrad'TensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
SimpleArgTensorFlow.OpGen.ParsedOp
simpleValueProto.Tensorflow.Core.Framework.Summary
sinTensorFlow.GenOps.Core
sin'TensorFlow.GenOps.Core
sinkTFRecordsTensorFlow.Records.Conduit
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.TensorShape
size' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
skipgramTensorFlow.GenOps.Core
skipgram'TensorFlow.GenOps.Core
sliceTensorFlow.GenOps.Core
slice'TensorFlow.GenOps.Core
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplus'TensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softplusGrad'TensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsign'TensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
softsignGrad'TensorFlow.GenOps.Core
sourceTFRecordsTensorFlow.Records.Conduit
spaceToBatchTensorFlow.GenOps.Core
spaceToBatch'TensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToBatchND'TensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
spaceToDepth'TensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAdd'TensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseAddGrad'TensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdadelta'TensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagrad'TensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyAdagradDA'TensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyFtrl'TensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyMomentum'TensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseApplyRMSProp'TensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseConcat'TensorFlow.GenOps.Core
sparseConditionalAccumulatorTensorFlow.GenOps.Core
sparseConditionalAccumulator'TensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseAdd'TensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseDiv'TensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseDenseCwiseMul'TensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseMatMul'TensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSum'TensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReduceSumSparse'TensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReorder'TensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseReshape'TensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMean'TensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentMeanGrad'TensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtN'TensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSegmentSum'TensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmax'TensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMaximum'TensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSparseMinimum'TensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseSplit'TensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseAdd'TensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseTensorDenseMatMul'TensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToDense' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToSparseSetOperationTensorFlow.GenOps.Core
sparseToSparseSetOperation'TensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
split'TensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
splitV'TensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrt'TensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
sqrtGrad'TensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
square'TensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squaredDifference'TensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
squeeze'TensorFlow.GenOps.Core
stackTensorFlow.GenOps.Core
stack'TensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackClose'TensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPop'TensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stackPush'TensorFlow.GenOps.Core
stageTensorFlow.GenOps.Core
stage'TensorFlow.GenOps.Core
statusProto.Tensorflow.Core.Util.Event
stepProto.Tensorflow.Core.Util.Event
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stopGradient'TensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSlice'TensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceAssign'TensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stridedSliceGrad'TensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringJoin'TensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringSplit'TensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucket'TensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketFast'TensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToHashBucketStrong'TensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringToNumber'TensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sub' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
substrTensorFlow.GenOps.Core
substr'TensorFlow.GenOps.Core
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.Summary
sum' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summariesTensorFlow.Build
Summary 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
summary 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Util.Event
Summary'Audio 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Image 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Value 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryTensor 
1 (Type/Class)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Logging
sumSquaresProto.Tensorflow.Core.Framework.Summary
svdTensorFlow.GenOps.Core
svd'TensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
switch'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - S

sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
sampleDistortedBoundingBox'TensorFlow.GenOps.Core
sampleDistortedBoundingBoxV2TensorFlow.GenOps.Core
sampleDistortedBoundingBoxV2'TensorFlow.GenOps.Core
sampleRateProto.Tensorflow.Core.Framework.Summary
save 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
save'TensorFlow.GenOps.Core
SavedModel 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.SavedModel
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.SavedModel
savedModelSchemaVersionProto.Tensorflow.Core.Protobuf.SavedModel
SavedSlice 
1 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
SavedSliceMeta 
1 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
SavedTensorSliceMeta 
1 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
SavedTensorSlices 
1 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
SaverDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Saver
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Saver
saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
SaverDef'CheckpointFormatVersionProto.Tensorflow.Core.Protobuf.Saver
SaverDef'LEGACYProto.Tensorflow.Core.Protobuf.Saver
SaverDef'V1Proto.Tensorflow.Core.Protobuf.Saver
SaverDef'V2Proto.Tensorflow.Core.Protobuf.Saver
SaveSliceInfoDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Variable
2 (Type/Class)Proto.Tensorflow.Core.Framework.Variable
saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
saveSlicesTensorFlow.GenOps.Core
saveSlices'TensorFlow.GenOps.Core
saveTensorNameProto.Tensorflow.Core.Protobuf.Saver
saveV2TensorFlow.GenOps.Core
saveV2'TensorFlow.GenOps.Core
Scalar 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
scalarTensorFlow.Ops
scalar'TensorFlow.Ops
scalarizeTensorFlow.Ops
scalarSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
scalarSummary'TensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterAdd'TensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterDiv'TensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterMul'TensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNd'TensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdAdd'TensorFlow.GenOps.Core
scatterNdNonAliasingAddTensorFlow.GenOps.Core
scatterNdNonAliasingAdd'TensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdSub'TensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterNdUpdate'TensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterSub'TensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scatterUpdate'TensorFlow.GenOps.Core
scheduledMicrosProto.Tensorflow.Core.Framework.StepStats
scomplexValProto.Tensorflow.Core.Framework.Tensor
sdcaFprintTensorFlow.GenOps.Core
sdcaFprint'TensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaOptimizer'TensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
sdcaShrinkL1'TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMax'TensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMean'TensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentMin'TensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentProd'TensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
segmentSum'TensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
select'TensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEig'TensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
selfAdjointEigV2'TensorFlow.GenOps.Core
SequenceExample 
1 (Data Constructor)Proto.Tensorflow.Core.Example.Example
2 (Type/Class)Proto.Tensorflow.Core.Example.Example
serialIdentifierProto.Tensorflow.Core.Util.TestLog
serializeManySparseTensorFlow.GenOps.Core
serializeManySparse'TensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
serializeSparse'TensorFlow.GenOps.Core
ServerDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorflowServer
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorflowServer
Session 
1 (Type/Class)TensorFlow.Session, TensorFlow.Core
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
SessionLog 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
sessionLogProto.Tensorflow.Core.Util.Event
SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
SessionLog'STARTProto.Tensorflow.Core.Util.Event
SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
SessionLog'STOPProto.Tensorflow.Core.Util.Event
SessionTTensorFlow.Session
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
setSizeTensorFlow.GenOps.Core
setSize'TensorFlow.GenOps.Core
Shape 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
4 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
5 (Function)Proto.Tensorflow.Core.Framework.AttrValue
6 (Function)Proto.Tensorflow.Core.Framework.CostGraph
7 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
8 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
shape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
shapeNTensorFlow.GenOps.Core
shapeN'TensorFlow.GenOps.Core
shapesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
shardedProto.Tensorflow.Core.Protobuf.Saver
shardedFilenameTensorFlow.GenOps.Core
shardedFilename'TensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
shardedFilespec'TensorFlow.GenOps.Core
shardIdProto.Tensorflow.Core.Protobuf.TensorBundle
shuffleDatasetTensorFlow.GenOps.Core
shuffleDataset'TensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoid'TensorFlow.GenOps.Core
sigmoidCrossEntropyWithLogitsTensorFlow.NN
sigmoidGradTensorFlow.GenOps.Core
sigmoidGrad'TensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
signatureProto.Tensorflow.Core.Framework.Function
SignatureDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
signatureDefProto.Tensorflow.Core.Protobuf.MetaGraph
SignatureDef'InputsEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
SignatureDef'OutputsEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
SimpleArgTensorFlow.OpGen.ParsedOp
simpleValueProto.Tensorflow.Core.Framework.Summary
sinTensorFlow.GenOps.Core
sin'TensorFlow.GenOps.Core
sinhTensorFlow.GenOps.Core
sinh'TensorFlow.GenOps.Core
sinkTFRecordsTensorFlow.Records.Conduit
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
4 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
5 (Function)Proto.Tensorflow.Core.Framework.TensorShape
size' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
skipDatasetTensorFlow.GenOps.Core
skipDataset'TensorFlow.GenOps.Core
skipgramTensorFlow.GenOps.Core
skipgram'TensorFlow.GenOps.Core
slice 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
slice'TensorFlow.GenOps.Core
slicesProto.Tensorflow.Core.Protobuf.TensorBundle
slotProto.Tensorflow.Core.Framework.StepStats
snapshotProto.Tensorflow.Core.Util.TestLog
snapshotNameProto.Tensorflow.Core.Framework.Variable
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplus'TensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softplusGrad'TensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsign'TensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
softsignGrad'TensorFlow.GenOps.Core
sourceTFRecordsTensorFlow.Records.Conduit
spaceToBatchTensorFlow.GenOps.Core
spaceToBatch'TensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToBatchND'TensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
spaceToDepth'TensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAdd'TensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseAddGrad'TensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdadelta'TensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagrad'TensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyAdagradDA'TensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyFtrl'TensorFlow.GenOps.Core
sparseApplyFtrlV2TensorFlow.GenOps.Core
sparseApplyFtrlV2'TensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyMomentum'TensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseApplyRMSProp'TensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseConcat'TensorFlow.GenOps.Core
sparseConditionalAccumulatorTensorFlow.GenOps.Core
sparseConditionalAccumulator'TensorFlow.GenOps.Core
sparseCrossTensorFlow.GenOps.Core
sparseCross'TensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseAdd'TensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseDiv'TensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseDenseCwiseMul'TensorFlow.GenOps.Core
sparseFillEmptyRowsTensorFlow.GenOps.Core
sparseFillEmptyRows'TensorFlow.GenOps.Core
sparseFillEmptyRowsGradTensorFlow.GenOps.Core
sparseFillEmptyRowsGrad'TensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseMatMul'TensorFlow.GenOps.Core
sparseReduceMaxTensorFlow.GenOps.Core
sparseReduceMax'TensorFlow.GenOps.Core
sparseReduceMaxSparseTensorFlow.GenOps.Core
sparseReduceMaxSparse'TensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSum'TensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReduceSumSparse'TensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReorder'TensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseReshape'TensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMean'TensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentMeanGrad'TensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtN'TensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSegmentSum'TensorFlow.GenOps.Core
sparseSliceTensorFlow.GenOps.Core
sparseSlice'TensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmax'TensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMaximum'TensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSparseMinimum'TensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseSplit'TensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseAdd'TensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseTensorDenseMatMul'TensorFlow.GenOps.Core
sparseTensorSliceDatasetTensorFlow.GenOps.Core
sparseTensorSliceDataset'TensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToDense' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToSparseSetOperationTensorFlow.GenOps.Core
sparseToSparseSetOperation'TensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
split'TensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
splitV'TensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrt'TensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
sqrtGrad'TensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
square'TensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squaredDifference'TensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
squeeze'TensorFlow.GenOps.Core
stackTensorFlow.GenOps.Core
stack'TensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackClose'TensorFlow.GenOps.Core
stackCloseV2TensorFlow.GenOps.Core
stackCloseV2'TensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPop'TensorFlow.GenOps.Core
stackPopV2TensorFlow.GenOps.Core
stackPopV2'TensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stackPush'TensorFlow.GenOps.Core
stackPushV2TensorFlow.GenOps.Core
stackPushV2'TensorFlow.GenOps.Core
stackV2TensorFlow.GenOps.Core
stackV2'TensorFlow.GenOps.Core
stageTensorFlow.GenOps.Core
stage'TensorFlow.GenOps.Core
stageClearTensorFlow.GenOps.Core
stageClear'TensorFlow.GenOps.Core
stagePeekTensorFlow.GenOps.Core
stagePeek'TensorFlow.GenOps.Core
stageSizeTensorFlow.GenOps.Core
stageSize'TensorFlow.GenOps.Core
startProto.Tensorflow.Core.Framework.TensorSlice
startTimeProto.Tensorflow.Core.Util.TestLog
statelessRandomNormalTensorFlow.GenOps.Core
statelessRandomNormal'TensorFlow.GenOps.Core
statelessRandomUniformTensorFlow.GenOps.Core
statelessRandomUniform'TensorFlow.GenOps.Core
statelessTruncatedNormalTensorFlow.GenOps.Core
statelessTruncatedNormal'TensorFlow.GenOps.Core
statusProto.Tensorflow.Core.Util.Event
stepProto.Tensorflow.Core.Util.Event
stepIdProto.Tensorflow.Core.Framework.LogMemory
StepStats 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
2 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stopGradient'TensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSlice'TensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceAssign'TensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stridedSliceGrad'TensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringJoin'TensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringSplit'TensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucket'TensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketFast'TensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToHashBucketStrong'TensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringToNumber'TensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
stringValueProto.Tensorflow.Core.Util.TestLog
strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sub' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
substrTensorFlow.GenOps.Core
substr'TensorFlow.GenOps.Core
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.Summary
sum' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summariesTensorFlow.Build
Summary 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
summary 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Util.Event
Summary'Audio 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Image 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Value 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Value'AudioProto.Tensorflow.Core.Framework.Summary
Summary'Value'HistoProto.Tensorflow.Core.Framework.Summary
Summary'Value'ImageProto.Tensorflow.Core.Framework.Summary
Summary'Value'ObsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
Summary'Value'SimpleValueProto.Tensorflow.Core.Framework.Summary
Summary'Value'TensorProto.Tensorflow.Core.Framework.Summary
Summary'Value'ValueProto.Tensorflow.Core.Framework.Summary
SummaryDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
summaryDescriptionProto.Tensorflow.Core.Framework.Summary
SummaryMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryMetadata'PluginData 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryTensor 
1 (Type/Class)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Logging
sumSquaresProto.Tensorflow.Core.Framework.Summary
svdTensorFlow.GenOps.Core
svd'TensorFlow.GenOps.Core
swapMemoryProto.Tensorflow.Core.Protobuf.ControlFlow
switchTensorFlow.GenOps.Core
switch'TensorFlow.GenOps.Core
systemProto.Tensorflow.Core.Util.TestLog
\ No newline at end of file diff --git a/docs/haddock/doc-index-T.html b/docs/haddock/doc-index-T.html index a70b346..c8fc6ba 100644 --- a/docs/haddock/doc-index-T.html +++ b/docs/haddock/doc-index-T.html @@ -1,4 +1,4 @@ - (Index - T)

 

Index - T

tag 
1 (Function)Proto.Tensorflow.Core.Util.Event
2 (Function)Proto.Tensorflow.Core.Framework.Summary
TaggedRunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
taggedRunMetadataProto.Tensorflow.Core.Util.Event
takeManySparseFromTensorsMapTensorFlow.GenOps.Core
takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tan'TensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanh'TensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
tanhGrad'TensorFlow.GenOps.Core
temporaryVariableTensorFlow.GenOps.Core
temporaryVariable'TensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
tensor 
1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
2 (Function)Proto.Tensorflow.Core.Framework.Summary
tensorArrayTensorFlow.GenOps.Core
tensorArray'TensorFlow.GenOps.Core
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayClose'TensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayCloseV2'TensorFlow.GenOps.Core
tensorArrayCloseV3TensorFlow.GenOps.Core
tensorArrayCloseV3'TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcat'TensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayConcatV2'TensorFlow.GenOps.Core
tensorArrayConcatV3TensorFlow.GenOps.Core
tensorArrayConcatV3'TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGather'TensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGatherV2'TensorFlow.GenOps.Core
tensorArrayGatherV3TensorFlow.GenOps.Core
tensorArrayGatherV3'TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGrad'TensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayGradV2'TensorFlow.GenOps.Core
tensorArrayGradV3TensorFlow.GenOps.Core
tensorArrayGradV3'TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayPack'TensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayRead'TensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayReadV2'TensorFlow.GenOps.Core
tensorArrayReadV3TensorFlow.GenOps.Core
tensorArrayReadV3'TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatter'TensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArrayScatterV2'TensorFlow.GenOps.Core
tensorArrayScatterV3TensorFlow.GenOps.Core
tensorArrayScatterV3'TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySize'TensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySizeV2'TensorFlow.GenOps.Core
tensorArraySizeV3TensorFlow.GenOps.Core
tensorArraySizeV3'TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplit'TensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArraySplitV2'TensorFlow.GenOps.Core
tensorArraySplitV3TensorFlow.GenOps.Core
tensorArraySplitV3'TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayUnpack'TensorFlow.GenOps.Core
tensorArrayV2TensorFlow.GenOps.Core
tensorArrayV2'TensorFlow.GenOps.Core
tensorArrayV3TensorFlow.GenOps.Core
tensorArrayV3'TensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWrite'TensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorArrayWriteV2'TensorFlow.GenOps.Core
tensorArrayWriteV3TensorFlow.GenOps.Core
tensorArrayWriteV3'TensorFlow.GenOps.Core
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
TensorDataTypeTensorFlow.Types, TensorFlow.Core
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor
TensorListTensorFlow.Tensor
tensorListOutputsTensorFlow.Tensor
tensorNodeNameTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefFromNameTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
tensorShapeProto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSummaryTensorFlow.GenOps.Core
tensorSummary'TensorFlow.GenOps.Core
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypeListTensorFlow.Types
TensorTypeProxy 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
TensorTypesTensorFlow.Types
tensorTypesTensorFlow.Types
tensorValTensorFlow.Types
tensorValueFromNameTensorFlow.Tensor
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
textLineReaderTensorFlow.GenOps.Core
textLineReader'TensorFlow.GenOps.Core
textLineReaderV2TensorFlow.GenOps.Core
textLineReaderV2'TensorFlow.GenOps.Core
TFName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
tfNameTensorFlow.OpGen.ParsedOp
tFRecordReaderTensorFlow.GenOps.Core
tFRecordReader'TensorFlow.GenOps.Core
tFRecordReaderV2TensorFlow.GenOps.Core
tFRecordReaderV2'TensorFlow.GenOps.Core
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tile'TensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
tileGrad'TensorFlow.GenOps.Core
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
toBuildTensorFlow.Tensor
topKTensorFlow.GenOps.Core
topK'TensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
topKV2'TensorFlow.GenOps.Core
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
transpose' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateDivTensorFlow.GenOps.Core
truncateDiv'TensorFlow.GenOps.Core
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncatedNormal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateModTensorFlow.GenOps.Core
truncateMod'TensorFlow.GenOps.Core
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeHintProto.Tensorflow.Core.Framework.Summary
typeListAttrProto.Tensorflow.Core.Framework.OpDef
TypeParam 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
typeParamIsListTensorFlow.OpGen.ParsedOp
typeParamRestrictionsTensorFlow.OpGen.ParsedOp
\ No newline at end of file +

 

Index - T

tag 
1 (Function)Proto.Tensorflow.Core.Util.Event
2 (Function)Proto.Tensorflow.Core.Framework.Summary
TaggedRunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
taggedRunMetadataProto.Tensorflow.Core.Util.Event
tagsProto.Tensorflow.Core.Protobuf.MetaGraph
takeDatasetTensorFlow.GenOps.Core
takeDataset'TensorFlow.GenOps.Core
takeManySparseFromTensorsMapTensorFlow.GenOps.Core
takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tan'TensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanh'TensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
tanhGrad'TensorFlow.GenOps.Core
targetProto.Tensorflow.Core.Util.TestLog
taskIndexProto.Tensorflow.Core.Protobuf.TensorflowServer
tasksProto.Tensorflow.Core.Protobuf.Cluster
temporaryMemorySizeProto.Tensorflow.Core.Framework.CostGraph
temporaryVariableTensorFlow.GenOps.Core
temporaryVariable'TensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
tensor 
1 (Function)Proto.Tensorflow.Core.Framework.LogMemory
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
3 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
4 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
5 (Function)Proto.Tensorflow.Core.Framework.Summary
tensorArrayTensorFlow.GenOps.Core
tensorArray'TensorFlow.GenOps.Core
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayClose'TensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayCloseV2'TensorFlow.GenOps.Core
tensorArrayCloseV3TensorFlow.GenOps.Core
tensorArrayCloseV3'TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcat'TensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayConcatV2'TensorFlow.GenOps.Core
tensorArrayConcatV3TensorFlow.GenOps.Core
tensorArrayConcatV3'TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGather'TensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGatherV2'TensorFlow.GenOps.Core
tensorArrayGatherV3TensorFlow.GenOps.Core
tensorArrayGatherV3'TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGrad'TensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayGradV2'TensorFlow.GenOps.Core
tensorArrayGradV3TensorFlow.GenOps.Core
tensorArrayGradV3'TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayPack'TensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayRead'TensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayReadV2'TensorFlow.GenOps.Core
tensorArrayReadV3TensorFlow.GenOps.Core
tensorArrayReadV3'TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatter'TensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArrayScatterV2'TensorFlow.GenOps.Core
tensorArrayScatterV3TensorFlow.GenOps.Core
tensorArrayScatterV3'TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySize'TensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySizeV2'TensorFlow.GenOps.Core
tensorArraySizeV3TensorFlow.GenOps.Core
tensorArraySizeV3'TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplit'TensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArraySplitV2'TensorFlow.GenOps.Core
tensorArraySplitV3TensorFlow.GenOps.Core
tensorArraySplitV3'TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayUnpack'TensorFlow.GenOps.Core
tensorArrayV2TensorFlow.GenOps.Core
tensorArrayV2'TensorFlow.GenOps.Core
tensorArrayV3TensorFlow.GenOps.Core
tensorArrayV3'TensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWrite'TensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorArrayWriteV2'TensorFlow.GenOps.Core
tensorArrayWriteV3TensorFlow.GenOps.Core
tensorArrayWriteV3'TensorFlow.GenOps.Core
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
tensorDatasetTensorFlow.GenOps.Core
tensorDataset'TensorFlow.GenOps.Core
TensorDataTypeTensorFlow.Types, TensorFlow.Core
tensorDataTypeTensorFlow.Internal.FFI
TensorDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorDescription
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorDescription
tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorflowGitVersionProto.Tensorflow.Core.Protobuf.MetaGraph
tensorflowVersionProto.Tensorflow.Core.Protobuf.MetaGraph
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorInfo 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
TensorInfo'CooSparse 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
TensorInfo'CooSparse'Proto.Tensorflow.Core.Protobuf.MetaGraph
TensorInfo'EncodingProto.Tensorflow.Core.Protobuf.MetaGraph
TensorInfo'NameProto.Tensorflow.Core.Protobuf.MetaGraph
TensorKindTensorFlow.Tensor
TensorListTensorFlow.Tensor
tensorListOutputsTensorFlow.Tensor
tensorNodeNameTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefFromNameTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
tensorShape 
1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
2 (Function)Proto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSliceDatasetTensorFlow.GenOps.Core
tensorSliceDataset'TensorFlow.GenOps.Core
TensorSliceProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorSlice
TensorSliceProto'Extent 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorSlice
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorSlice
TensorSliceProto'Extent'HasLengthProto.Tensorflow.Core.Framework.TensorSlice
TensorSliceProto'Extent'LengthProto.Tensorflow.Core.Framework.TensorSlice
tensorSummaryTensorFlow.GenOps.Core
tensorSummary'TensorFlow.GenOps.Core
tensorSummaryV2TensorFlow.GenOps.Core
tensorSummaryV2'TensorFlow.GenOps.Core
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypeListTensorFlow.Types
TensorTypeProxy 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
TensorTypesTensorFlow.Types
tensorTypesTensorFlow.Types
tensorValTensorFlow.Types
tensorValueFromNameTensorFlow.Tensor
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
TestResults 
1 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
2 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
TestResults'ANDROID_BENCHMARKProto.Tensorflow.Core.Util.TestLog
TestResults'BenchmarkTypeProto.Tensorflow.Core.Util.TestLog
TestResults'CPP_MICROBENCHMARKProto.Tensorflow.Core.Util.TestLog
TestResults'PYTHON_BENCHMARKProto.Tensorflow.Core.Util.TestLog
TestResults'UNKNOWNProto.Tensorflow.Core.Util.TestLog
textLineDatasetTensorFlow.GenOps.Core
textLineDataset'TensorFlow.GenOps.Core
textLineReaderTensorFlow.GenOps.Core
textLineReader'TensorFlow.GenOps.Core
textLineReaderV2TensorFlow.GenOps.Core
textLineReaderV2'TensorFlow.GenOps.Core
TFName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
tfNameTensorFlow.OpGen.ParsedOp
tFRecordDatasetTensorFlow.GenOps.Core
tFRecordDataset'TensorFlow.GenOps.Core
tFRecordReaderTensorFlow.GenOps.Core
tFRecordReader'TensorFlow.GenOps.Core
tFRecordReaderV2TensorFlow.GenOps.Core
tFRecordReaderV2'TensorFlow.GenOps.Core
threadIdProto.Tensorflow.Core.Framework.StepStats
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
throughputProto.Tensorflow.Core.Util.TestLog
tileTensorFlow.GenOps.Core
tile'TensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
tileGrad'TensorFlow.GenOps.Core
timelineLabelProto.Tensorflow.Core.Framework.StepStats
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
toBuildTensorFlow.Tensor
tolerateDebugOpCreationFailuresProto.Tensorflow.Core.Protobuf.Debug
topKTensorFlow.GenOps.Core
topK'TensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
topKV2'TensorFlow.GenOps.Core
totalProto.Tensorflow.Core.Util.TestLog
totalBytesProto.Tensorflow.Core.Framework.StepStats
ToTensorTensorFlow.Tensor
toTensorTensorFlow.Tensor
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
transpose' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateDivTensorFlow.GenOps.Core
truncateDiv'TensorFlow.GenOps.Core
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncatedNormal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateModTensorFlow.GenOps.Core
truncateMod'TensorFlow.GenOps.Core
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
3 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
4 (Function)Proto.Tensorflow.Core.Util.TestLog
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeHintProto.Tensorflow.Core.Framework.Summary
typeListAttrProto.Tensorflow.Core.Framework.OpDef
TypeParam 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
typeParamIsListTensorFlow.OpGen.ParsedOp
typeParamRestrictionsTensorFlow.OpGen.ParsedOp
\ No newline at end of file diff --git a/docs/haddock/doc-index-U.html b/docs/haddock/doc-index-U.html index ef799fe..b4bfad2 100644 --- a/docs/haddock/doc-index-U.html +++ b/docs/haddock/doc-index-U.html @@ -1,4 +1,4 @@ - (Index - U)

 

\ No newline at end of file +

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-V.html b/docs/haddock/doc-index-V.html index 6c813c5..7f0e698 100644 --- a/docs/haddock/doc-index-V.html +++ b/docs/haddock/doc-index-V.html @@ -1,4 +1,4 @@ - (Index - V)

 

\ No newline at end of file +

 

Index - V

Value 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
value 
1 (Function)TensorFlow.Tensor, TensorFlow.Core
2 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
3 (Function)Proto.Tensorflow.Core.Example.Feature
4 (Function)Proto.Tensorflow.Core.Protobuf.Config
5 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
6 (Function)Proto.Tensorflow.Core.Framework.Function
7 (Function)Proto.Tensorflow.Core.Framework.NodeDef
8 (Function)Proto.Tensorflow.Core.Framework.AttrValue
9 (Function)Proto.Tensorflow.Core.Framework.Summary
10 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
11 (Function)Proto.Tensorflow.Core.Protobuf.ControlFlow
12 (Function)Proto.Tensorflow.Core.Util.TestLog
valuesProto.Tensorflow.Core.Protobuf.ControlFlow
ValuesDef 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
ValuesDef'ExternalValuesEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
valuesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
varHandleOpTensorFlow.GenOps.Core
varHandleOp'TensorFlow.GenOps.Core
VariableTensorFlow.Variable
variable 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
3 (Function)TensorFlow.Ops
variable' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Variable
3 (Function)TensorFlow.Ops
VariableDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Variable
2 (Type/Class)Proto.Tensorflow.Core.Framework.Variable
variableNameProto.Tensorflow.Core.Framework.Variable
variableV2TensorFlow.GenOps.Core
variableV2'TensorFlow.GenOps.Core
varIsInitializedOpTensorFlow.GenOps.Core
varIsInitializedOp'TensorFlow.GenOps.Core
varLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
VarLenFeatureProto 
1 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
2 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
varOffsetProto.Tensorflow.Core.Framework.Variable
varShapeProto.Tensorflow.Core.Framework.Variable
vectorTensorFlow.Ops
vector'TensorFlow.Ops
version 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
3 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
4 (Function)Proto.Tensorflow.Core.Protobuf.Saver
5 (Function)Proto.Tensorflow.Core.Util.TestLog
VersionDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Versions
2 (Type/Class)Proto.Tensorflow.Core.Framework.Versions
versionNumberProto.Tensorflow.Core.Framework.Tensor
versions 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-W.html b/docs/haddock/doc-index-W.html index b404456..b99aba2 100644 --- a/docs/haddock/doc-index-W.html +++ b/docs/haddock/doc-index-W.html @@ -1,4 +1,4 @@ - (Index - W)

 

\ No newline at end of file +

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-Z.html b/docs/haddock/doc-index-Z.html index 1626417..1cd0197 100644 --- a/docs/haddock/doc-index-Z.html +++ b/docs/haddock/doc-index-Z.html @@ -1,4 +1,4 @@ - (Index - Z)

 

Index - Z

zeroInitializedVariableTensorFlow.Ops
zeroInitializedVariable'TensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zerosLike' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
zeta'TensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - Z

zeroInitializedVariable 
1 (Function)TensorFlow.Variable
2 (Function)TensorFlow.Ops
zeroInitializedVariable' 
1 (Function)TensorFlow.Variable
2 (Function)TensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zerosLike' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
zeta'TensorFlow.GenOps.Core
zipDatasetTensorFlow.GenOps.Core
zipDataset'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index.html b/docs/haddock/doc-index.html index bfab859..22f168e 100644 --- a/docs/haddock/doc-index.html +++ b/docs/haddock/doc-index.html @@ -1,4 +1,4 @@ - (Index)

 

\ No newline at end of file +

 

\ No newline at end of file diff --git a/docs/haddock/frames.html b/docs/haddock/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/haddock-util.js b/docs/haddock/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/haddock-util.js +++ b/docs/haddock/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/index-frames.html b/docs/haddock/index-frames.html deleted file mode 100644 index 1058a34..0000000 --- a/docs/haddock/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -

Modules

\ No newline at end of file diff --git a/docs/haddock/index.html b/docs/haddock/index.html index 02ab365..21a997d 100644 --- a/docs/haddock/index.html +++ b/docs/haddock/index.html @@ -1,4 +1,4 @@ -

 

\ No newline at end of file +

 

Modules

\ No newline at end of file diff --git a/docs/haddock/ocean.css b/docs/haddock/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/ocean.css +++ b/docs/haddock/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html deleted file mode 100644 index 04a5c62..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html +++ /dev/null @@ -1,14 +0,0 @@ -TensorFlow.Build

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Build

Graph node types

newtype ControlNode

A type of graph node which has no outputs. These nodes are - valuable for causing side effects when they are run.

Constructors

ControlNode 

data Unique

Instances

Ops

opAttr :: Attribute a => Text -> Lens' OpDef a

opInputs :: Lens' OpDef [Output]

The Build monad

data GraphState

Instances

Monad m => MonadState GraphState (BuildT m) 

data BuildT m a

An action for building nodes in a TensorFlow graph. - Used to manage build state internally as part of the Session monad.

Instances

MonadTrans BuildT 
TensorKind Build 
Monad m => MonadState GraphState (BuildT m) 
Monad m => Monad (BuildT m) 
Functor m => Functor (BuildT m) 
Monad m => Applicative (BuildT m) 
MonadIO m => MonadIO (BuildT m) 
MonadThrow m => MonadThrow (BuildT m) 
MonadMask m => MonadMask (BuildT m) 
MonadCatch m => MonadCatch (BuildT m) 
Monad m => MonadBuild (BuildT m) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 

type Build = BuildT Identity

An action for building nodes in a TensorFlow graph.

class Monad m => MonadBuild m where

Lift a Build action into a monad, including any explicit op renderings.

Methods

build :: Build a -> m a

addInitializer :: MonadBuild m => ControlNode -> m ()

Registers the given node to be executed before the next - run.

hoistBuildT :: (forall a. m a -> n a) -> BuildT m b -> BuildT n b

This is Control.Monad.Morph.hoist sans the dependency.

evalBuildT :: Monad m => BuildT m a -> m a

runBuildT :: BuildT m a -> m (a, GraphState)

asGraphDef :: Build a -> GraphDef

Produce a GraphDef proto representation of the nodes that are rendered in - the given Build action.

flushInitializers :: Monad m => BuildT m [NodeName]

Get all the initializers that have accumulated so far, and clear - that buffer.

flushNodeBuffer :: MonadBuild m => m [NodeDef]

Get all the NodeDefs that have accumulated so far, and clear that buffer.

Creating and looking up Ops

getOrAddOp :: OpDef -> Build NodeName

Render the given op if it hasn't been rendered already, and return its - name.

addNewOp :: OpDef -> Build NodeName

Add a new node for a given OpDef. This is used for making "stateful" ops - which are not safe to dedup (e.g, "variable" and "assign").

encodeOutput :: Output -> Text

Turn an Output into a string representation for the TensorFlow - foreign APIs.

Modifying all nodes in a Build action

withStateLens :: MonadBuild m => Lens' GraphState a -> (a -> a) -> m b -> m b

Modify some part of the state, run an action, and restore the state - after that action is done.

withDevice :: MonadBuild m => Maybe Device -> m a -> m a

Set a device for all nodes rendered in the given Build action - (unless further overridden by another use of withDevice).

withNameScope :: MonadBuild m => Text -> m a -> m a

Prepend a scope to all nodes rendered in the given Build action.

withNodeDependencies :: MonadBuild m => Set NodeName -> m a -> m a

Add control inputs to all nodes rendered in the given Build action.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html deleted file mode 100644 index 674fe17..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html +++ /dev/null @@ -1,6 +0,0 @@ -TensorFlow.BuildOp

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.BuildOp

Synopsis

Documentation

class BuildResult a where

Class of types that can be used as op outputs.

Methods

buildResult :: Result a

Instances

BuildResult ResourceHandle 
BuildResult ControlNode 
BuildResult a => BuildResult [a] 
(BuildResult a1, BuildResult a2) => BuildResult (a1, a2) 
(Rendered v, TensorTypes as) => BuildResult (TensorList v as) 
Rendered v => BuildResult (Tensor v a) 
(BuildResult a1, BuildResult a2, BuildResult a3) => BuildResult (a1, a2, a3) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4) => BuildResult (a1, a2, a3, a4) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5) => BuildResult (a1, a2, a3, a4, a5) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5, BuildResult a6) => BuildResult (a1, a2, a3, a4, a5, a6) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5, BuildResult a6, BuildResult a7) => BuildResult (a1, a2, a3, a4, a5, a6, a7) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5, BuildResult a6, BuildResult a7, BuildResult a8) => BuildResult (a1, a2, a3, a4, a5, a6, a7, a8) 

buildOp :: BuildResult a => [Int64] -> OpDef -> Build a

class PureResult a where

Class of types that can be used as op outputs.

Methods

pureResult :: ReaderT (Build OpDef) (State ResultState) a

Instances

PureResult a => PureResult [a] 
(PureResult a1, PureResult a2) => PureResult (a1, a2) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 
(PureResult a1, PureResult a2, PureResult a3) => PureResult (a1, a2, a3) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4) => PureResult (a1, a2, a3, a4) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5) => PureResult (a1, a2, a3, a4, a5) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5, PureResult a6) => PureResult (a1, a2, a3, a4, a5, a6) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5, PureResult a6, PureResult a7) => PureResult (a1, a2, a3, a4, a5, a6, a7) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5, PureResult a6, PureResult a7, PureResult a8) => PureResult (a1, a2, a3, a4, a5, a6, a7, a8) 

pureOp :: PureResult a => [Int64] -> Build OpDef -> a

eqLengthGuard :: [(String, [(String, Int)])] -> Bool

Returns true if all the integers in each tuple are identical. - Throws an error with a descriptive message if not.

type OpParams = OpDef -> OpDef

Parameters to build an op (for example, the node name or optional attributes). - TODO: be more type safe.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html deleted file mode 100644 index dbf1957..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html +++ /dev/null @@ -1,6 +0,0 @@ -TensorFlow.ControlFlow

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.ControlFlow

Synopsis

Dependencies

withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a

Modify a Build action, such that all new ops rendered in it will depend - on the nodes in the first argument.

group :: (MonadBuild m, Nodes t) => t -> m ControlNode

Create an op that groups multiple operations.

When this op finishes, all ops in the input n have finished. This op has - no output.

Operations

noOp :: MonadBuild m => m ControlNode

Does nothing. Only useful as a placeholder for control edges.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Core.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Core.html deleted file mode 100644 index a3079ba..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Core.html +++ /dev/null @@ -1,49 +0,0 @@ -TensorFlow.Core

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Core

Description

The core functionality of TensorFlow.

Unless you are defining ops, you do not need to import other modules from - this package.

Basic ops are provided in the tensorflow-ops and tensorflow-core-ops - packages.

Synopsis

Session

data Session a

data Options

Customization for session. Use the lenses to update: - sessionTarget, sessionTracer, sessionConfig.

Instances

Default Options 

sessionConfig :: Lens' Options ConfigProto

Uses the specified config for the created session.

sessionTarget :: Lens' Options ByteString

Target can be: "local", ip:port, host:port. - The set of supported factories depends on the linked in libraries.

sessionTracer :: Lens' Options Tracer

Uses the given logger to monitor session progress.

runSession :: Session a -> IO a

Run Session actions in a new TensorFlow session.

runSessionWithOptions :: Options -> Session a -> IO a

Run Session actions in a new TensorFlow session created with - the given option setter actions (sessionTarget, sessionConfig).

Building graphs

class Monad m => MonadBuild m where

Lift a Build action into a monad, including any explicit op renderings.

Methods

build :: Build a -> m a

Running graphs

class Nodes t => Fetchable t a

Types that tensor representations (e.g. Tensor, ControlNode) can be - fetched into.

Includes collections of tensors (e.g. tuples).

Minimal complete definition

getFetch

Instances

(~) * a () => Fetchable ControlNode a 
Fetchable t a => Fetchable [t] [a] 
(~) * l (List ([] *)) => Fetchable (ListOf f ([] *)) l 
(TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 
(Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) 
(Fetchable (f t) a, Fetchable (ListOf f ts) (List as), (~) (* -> *) i Identity) => Fetchable (ListOf f ((:) * t ts)) (ListOf i ((:) * a as)) 
(Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3) => Fetchable (t1, t2, t3) (a1, a2, a3) 

class Nodes t

Types that contain ops which can be run.

Minimal complete definition

getNodes

Instances

Nodes ControlNode 
Nodes t => Nodes [t] 
(Nodes t1, Nodes t2) => Nodes (t1, t2) 
(Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f ((:) * a as)) 
Nodes (ListOf f ([] *)) 
Nodes (Tensor v a) 
(Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) 

run :: Fetchable t a => t -> Session a

Run a subgraph t, rendering any dependent nodes that aren't already - rendered, and fetch the corresponding values for a.

run_ :: Nodes t => t -> Session ()

Run a subgraph t, rendering and extending any dependent nodes that aren't - already rendered. This behaves like run except that it doesn't do any - fetches.

data Feed

A pair of a Tensor and some data that should be fed into that Tensor - when running the graph.

feed :: Rendered v => Tensor v a -> TensorData a -> Feed

Create a Feed for feeding the given data into a Tensor when running - the graph.

Note that if a Tensor is rendered, its identity may change; so feeding the - rendered Tensor may be different than feeding the original Tensor.

runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a

Run a subgraph t, rendering any dependent nodes that aren't already - rendered, feed the given input values, and fetch the corresponding result - values for a.

runWithFeeds_ :: Nodes t => [Feed] -> t -> Session ()

Run a subgraph t, rendering any dependent nodes that aren't already - rendered, feed the given input values, and fetch the corresponding result - values for a. This behaves like runWithFeeds except that it doesn't do - any fetches.

Async

asyncProdNodes

Arguments

:: Nodes t 
=> t

Node to evaluate concurrently.

-> Session () 

Starts a concurrent thread which evaluates the given Nodes - forever until runSession exits or an exception occurs. Graph - extension happens synchronously, but the resultant run proceeds as - a separate thread.

Build

type Build = BuildT Identity

An action for building nodes in a TensorFlow graph.

data BuildT m a

An action for building nodes in a TensorFlow graph. - Used to manage build state internally as part of the Session monad.

Instances

MonadTrans BuildT 
TensorKind Build 
Monad m => MonadState GraphState (BuildT m) 
Monad m => Monad (BuildT m) 
Functor m => Functor (BuildT m) 
Monad m => Applicative (BuildT m) 
MonadIO m => MonadIO (BuildT m) 
MonadThrow m => MonadThrow (BuildT m) 
MonadMask m => MonadMask (BuildT m) 
MonadCatch m => MonadCatch (BuildT m) 
Monad m => MonadBuild (BuildT m) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 

render :: MonadBuild m => Tensor Build a -> m (Tensor Value a)

Render a Tensor, fixing its name, scope, device and control inputs from - the MonadBuild context. Also renders any dependencies of the Tensor that - weren't already rendered.

This operation is idempotent; calling render on the same input in the same - context will produce the same result. However, rendering the same - Tensor Build in two different contexts may result in two different - Tensor Values.

asGraphDef :: Build a -> GraphDef

Produce a GraphDef proto representation of the nodes that are rendered in - the given Build action.

opAttr :: Attribute a => Text -> Lens' OpDef a

Tensor

data ControlNode

A type of graph node which has no outputs. These nodes are - valuable for causing side effects when they are run.

data Tensor v a

A named output of a TensorFlow operation.

The type parameter a is the type of the elements in the Tensor. The - parameter v is either:

  • Build: An unrendered, immutable value.
  • Value: A rendered, immutable value.
  • Ref: A rendered stateful handle (e.g., a variable).

Note that expr, value, render and renderValue can help convert between - the different types of Tensor.

Instances

BuildInputs (ListOf (Tensor v) as) 
BuildInputs (Tensor v a) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 
(Rendered v, TensorTypes as) => BuildResult (TensorList v as) 
Rendered v => BuildResult (Tensor v a) 
Nodes (Tensor v a) 
(TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 

value :: Tensor Ref a -> Tensor Value a

Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op.

tensorFromName :: TensorKind v => Text -> Tensor v a

Create a Tensor for a given name. This can be used to reference nodes - in a GraphDef that was loaded via addGraphDef. - TODO(judahjacobson): add more safety checks here.

expr :: TensorKind v => Tensor v a -> Tensor Build a

Element types

data TensorData a

Tensor data with the correct memory layout for tensorflow.

Instances

(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 

class TensorType a => TensorDataType s a where

Types that can be converted to and from TensorData.

Vector is the most efficient to encode/decode for most element types.

Methods

decodeTensorData :: TensorData a -> s a

Decode the bytes of a TensorData into an s.

encodeTensorData :: Shape -> s a -> TensorData a

Encode an s into a TensorData.

The values should be in row major order, e.g.,

element 0: index (0, ..., 0) - element 1: index (0, ..., 1) - ...

newtype Scalar a

Constructors

Scalar 

Fields

unScalar :: a
 

Instances

TensorDataType Vector a => TensorDataType Scalar a 
Eq a => Eq (Scalar a) 
Floating a => Floating (Scalar a) 
Fractional a => Fractional (Scalar a) 
Num a => Num (Scalar a) 
Ord a => Ord (Scalar a) 
Real a => Real (Scalar a) 
RealFloat a => RealFloat (Scalar a) 
RealFrac a => RealFrac (Scalar a) 
Show a => Show (Scalar a) 
IsString a => IsString (Scalar a) 

newtype Shape

Shape (dimensions) of a tensor.

Constructors

Shape [Int64] 

type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a)

A Constraint specifying the possible choices of a TensorType.

We implement a Constraint like OneOf '[Double, Float] a by turning the - natural representation as a conjunction, i.e.,

   a == Double || a == Float
-

into a disjunction like

    a /= Int32 && a /= Int64 && a /= ByteString && ...
-

using an enumeration of all the possible TensorTypes.

type family a /= b :: Constraint

A constraint checking that two types are different.

Equations

a /= a = TypeError a ~ ExcludedCase 
a /= b = () 

Op combinators

colocateWith :: (MonadBuild m, Rendered v) => Tensor v b -> m a -> m a

Places all nodes rendered in the given Build action on the same - device as the given Tensor (see also withDevice). Make sure that - the action has side effects of rendering the desired tensors. A pure - return would not have the desired effect.

newtype Device

A device that a node can be assigned to. - There's a naming convention where the device names - are constructed from job and replica names.

Constructors

Device 

Fields

deviceName :: Text
 

withDevice :: MonadBuild m => Maybe Device -> m a -> m a

Set a device for all nodes rendered in the given Build action - (unless further overridden by another use of withDevice).

withNameScope :: MonadBuild m => Text -> m a -> m a

Prepend a scope to all nodes rendered in the given Build action.

Dependencies

withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a

Modify a Build action, such that all new ops rendered in it will depend - on the nodes in the first argument.

group :: (MonadBuild m, Nodes t) => t -> m ControlNode

Create an op that groups multiple operations.

When this op finishes, all ops in the input n have finished. This op has - no output.

Misc

noOp :: MonadBuild m => m ControlNode

Does nothing. Only useful as a placeholder for control edges.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html deleted file mode 100644 index bc163c6..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html +++ /dev/null @@ -1,8 +0,0 @@ -TensorFlow.Internal.FFI

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Internal.FFI

Synopsis

Documentation

data Session

withSession

Arguments

:: (SessionOptions -> IO ()) 
-> ((IO () -> IO ()) -> Session -> IO a)

The action can spawn concurrent tasks which will - be canceled before withSession returns.

-> IO a 

Runs the given action after creating a session with options - populated by the given optionSetter.

run

Arguments

:: Session 
-> [(ByteString, TensorData)]

Feeds.

-> [ByteString]

Fetches.

-> [ByteString]

Targets.

-> IO [TensorData] 

data TensorData

All of the data needed to represent a tensor.

setSessionConfig :: ConfigProto -> SessionOptions -> IO ()

setSessionTarget :: ByteString -> SessionOptions -> IO ()

getAllOpList :: IO ByteString

Returns the serialized OpList of all OpDefs defined in this - address space.

Internal helper.

useProtoAsVoidPtrLen :: (Message msg, Integral c, Show c, Bits c) => msg -> (Ptr b -> c -> IO a) -> IO a

Serializes the given msg and provides it as (ptr,len) argument - to the given action.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html deleted file mode 100644 index 1f4db93..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html +++ /dev/null @@ -1,4 +0,0 @@ -TensorFlow.Internal.VarInt

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellSafe
LanguageHaskell2010

TensorFlow.Internal.VarInt

Description

Originally taken from internal proto-lens code.

Synopsis

Documentation

getVarInt :: Parser Word64

Decode an unsigned varint.

putVarInt :: Word64 -> Builder

Encode a Word64.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html deleted file mode 100644 index 5b12a0e..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html +++ /dev/null @@ -1,6 +0,0 @@ -TensorFlow.Nodes

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Nodes

Synopsis

Documentation

class Nodes t where

Types that contain ops which can be run.

Methods

getNodes :: t -> Build (Set NodeName)

Instances

Nodes ControlNode 
Nodes t => Nodes [t] 
(Nodes t1, Nodes t2) => Nodes (t1, t2) 
(Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f ((:) * a as)) 
Nodes (ListOf f ([] *)) 
Nodes (Tensor v a) 
(Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) 

class Nodes t => Fetchable t a where

Types that tensor representations (e.g. Tensor, ControlNode) can be - fetched into.

Includes collections of tensors (e.g. tuples).

Methods

getFetch :: t -> Build (Fetch a)

Instances

(~) * a () => Fetchable ControlNode a 
Fetchable t a => Fetchable [t] [a] 
(~) * l (List ([] *)) => Fetchable (ListOf f ([] *)) l 
(TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 
(Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) 
(Fetchable (f t) a, Fetchable (ListOf f ts) (List as), (~) (* -> *) i Identity) => Fetchable (ListOf f ((:) * t ts)) (ListOf i ((:) * a as)) 
(Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3) => Fetchable (t1, t2, t3) (a1, a2, a3) 

data Fetch a

Fetch action. Keeps track of what needs to be fetched and how to decode - the fetched data.

Constructors

Fetch 

Fields

fetches :: Set Text

Nodes to fetch

fetchRestore :: Map Text TensorData -> a

Function to create an a from the fetched data.

nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b

fetchTensorVector :: forall a v. TensorType a => Tensor v a -> Build (Fetch (TensorData a))

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html deleted file mode 100644 index 166ef5c..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html +++ /dev/null @@ -1,13 +0,0 @@ -TensorFlow.Output

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Output

Contents

Documentation

newtype ControlNode

A type of graph node which has no outputs. These nodes are - valuable for causing side effects when they are run.

Constructors

ControlNode 

newtype Device

A device that a node can be assigned to. - There's a naming convention where the device names - are constructed from job and replica names.

Constructors

Device 

Fields

deviceName :: Text
 

Ops

newtype NodeName

The name of a node in the graph. This corresponds to the proto field - NodeDef.name. Includes the scope prefix (if any) and a unique identifier - (if the node was implicitly named).

Constructors

NodeName 

Fields

unNodeName :: Text
 

data OpDef

Op definition. This corresponds somewhat to the NodeDef proto.

Instances

opAttr :: Attribute a => Text -> Lens' OpDef a

opInputs :: Lens' OpDef [Output]

newtype OpType

The type of op of a node in the graph. This corresponds to the proto field - NodeDef.op.

Constructors

OpType 

Fields

unOpType :: Text
 

newtype OutputIx

Constructors

OutputIx 

Fields

unOutputIx :: Int
 

data Output

An output of a TensorFlow node.

Constructors

Output 

data PendingNodeName

The name specified for an unrendered Op. If an Op has an - ImplicitName, it will be assigned based on the opType plus a - unique identifier. Does not contain the "scope" prefix.

Constructors

ExplicitName !Text 
ImplicitName 

newtype ResourceHandle

Opaque handle to a mutable resource in the graph. Typical such - resources are variables.

Constructors

ResourceHandle Output 
\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html deleted file mode 100644 index 5b40269..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html +++ /dev/null @@ -1,19 +0,0 @@ -TensorFlow.Session

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Session

Synopsis

Documentation

data Session a

data Options

Customization for session. Use the lenses to update: - sessionTarget, sessionTracer, sessionConfig.

Instances

Default Options 

sessionConfig :: Lens' Options ConfigProto

Uses the specified config for the created session.

sessionTarget :: Lens' Options ByteString

Target can be: "local", ip:port, host:port. - The set of supported factories depends on the linked in libraries.

sessionTracer :: Lens' Options Tracer

Uses the given logger to monitor session progress.

runSession :: Session a -> IO a

Run Session actions in a new TensorFlow session.

runSessionWithOptions :: Options -> Session a -> IO a

Run Session actions in a new TensorFlow session created with - the given option setter actions (sessionTarget, sessionConfig).

class Monad m => MonadBuild m where

Lift a Build action into a monad, including any explicit op renderings.

Methods

build :: Build a -> m a

extend :: Session ()

Add all pending rendered nodes to the TensorFlow graph and runs - any pending initializers.

Note that run, runWithFeeds, etc. will all call this function implicitly.

run :: Fetchable t a => t -> Session a

Run a subgraph t, rendering any dependent nodes that aren't already - rendered, and fetch the corresponding values for a.

runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a

Run a subgraph t, rendering any dependent nodes that aren't already - rendered, feed the given input values, and fetch the corresponding result - values for a.

run_ :: Nodes t => t -> Session ()

Run a subgraph t, rendering and extending any dependent nodes that aren't - already rendered. This behaves like run except that it doesn't do any - fetches.

runWithFeeds_ :: Nodes t => [Feed] -> t -> Session ()

Run a subgraph t, rendering any dependent nodes that aren't already - rendered, feed the given input values, and fetch the corresponding result - values for a. This behaves like runWithFeeds except that it doesn't do - any fetches.

asyncProdNodes

Arguments

:: Nodes t 
=> t

Node to evaluate concurrently.

-> Session () 

Starts a concurrent thread which evaluates the given Nodes - forever until runSession exits or an exception occurs. Graph - extension happens synchronously, but the resultant run proceeds as - a separate thread.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html deleted file mode 100644 index 5199bf9..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html +++ /dev/null @@ -1,25 +0,0 @@ -TensorFlow.Tensor

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Tensor

Synopsis

Documentation

data Tensor v a where

A named output of a TensorFlow operation.

The type parameter a is the type of the elements in the Tensor. The - parameter v is either:

  • Build: An unrendered, immutable value.
  • Value: A rendered, immutable value.
  • Ref: A rendered stateful handle (e.g., a variable).

Note that expr, value, render and renderValue can help convert between - the different types of Tensor.

Constructors

Tensor :: TensorKind v => v Output -> Tensor v a 

Fields

tensorOutput :: v Output
 

Instances

BuildInputs (ListOf (Tensor v) as) 
BuildInputs (Tensor v a) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 
(Rendered v, TensorTypes as) => BuildResult (TensorList v as) 
Rendered v => BuildResult (Tensor v a) 
Nodes (Tensor v a) 
(TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 

newtype Value a

Constructors

Value 

Fields

runValue :: a
 

newtype Ref a

Constructors

Ref 

Fields

runRef :: a
 

value :: Tensor Ref a -> Tensor Value a

Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op.

data Feed

A pair of a Tensor and some data that should be fed into that Tensor - when running the graph.

Constructors

Feed Output TensorData 

class TensorKind v => Rendered v where

A class ensuring that a given tensor is rendered, i.e., has a fixed - name, device, etc.

Methods

rendered :: v a -> a

feed :: Rendered v => Tensor v a -> TensorData a -> Feed

Create a Feed for feeding the given data into a Tensor when running - the graph.

Note that if a Tensor is rendered, its identity may change; so feeding the - rendered Tensor may be different than feeding the original Tensor.

tensorFromName :: TensorKind v => Text -> Tensor v a

Create a Tensor for a given name. This can be used to reference nodes - in a GraphDef that was loaded via addGraphDef. - TODO(judahjacobson): add more safety checks here.

tensorValueFromName :: Text -> Tensor Value a

Like tensorFromName, but type-restricted to Value.

tensorRefFromName :: Text -> Tensor Ref a

Like tensorFromName, but type-restricted to Ref.

type TensorList v = ListOf (Tensor v)

colocateWith :: (MonadBuild m, Rendered v) => Tensor v b -> m a -> m a

Places all nodes rendered in the given Build action on the same - device as the given Tensor (see also withDevice). Make sure that - the action has side effects of rendering the desired tensors. A pure - return would not have the desired effect.

render :: MonadBuild m => Tensor Build a -> m (Tensor Value a)

Render a Tensor, fixing its name, scope, device and control inputs from - the MonadBuild context. Also renders any dependencies of the Tensor that - weren't already rendered.

This operation is idempotent; calling render on the same input in the same - context will produce the same result. However, rendering the same - Tensor Build in two different contexts may result in two different - Tensor Values.

expr :: TensorKind v => Tensor v a -> Tensor Build a

addSummary

Arguments

:: (MonadBuild m, TensorKind v) 
=> Tensor v ByteString

A SummaryTensor

-> m () 

Records the given summary action in Build for retrieval with - Summary protocol buffer in string form. For safety, use the - pre-composed functions: Logging.scalarSummary and - Logging.histogramSummary.

collectAllSummaries :: MonadBuild m => m [SummaryTensor]

Retrieves the summary ops collected thus far. Typically this only - happens once, but if buildWithSummary is used - repeatedly, the values accumulate.

type SummaryTensor = Tensor Value ByteString

Synonym for the tensors that return serialized Summary proto.

class Monad v => TensorKind v where

An internal class for kinds of Tensors.

Methods

toBuild :: v a -> Build a

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html deleted file mode 100644 index 779507e..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html +++ /dev/null @@ -1,12 +0,0 @@ -TensorFlow.Types

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Types

Synopsis

Documentation

newtype TensorData a

Tensor data with the correct memory layout for tensorflow.

Constructors

TensorData 

Instances

(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 

class TensorType a => TensorDataType s a where

Types that can be converted to and from TensorData.

Vector is the most efficient to encode/decode for most element types.

Methods

decodeTensorData :: TensorData a -> s a

Decode the bytes of a TensorData into an s.

encodeTensorData :: Shape -> s a -> TensorData a

Encode an s into a TensorData.

The values should be in row major order, e.g.,

element 0: index (0, ..., 0) - element 1: index (0, ..., 1) - ...

newtype Scalar a

Constructors

Scalar 

Fields

unScalar :: a
 

Instances

TensorDataType Vector a => TensorDataType Scalar a 
Eq a => Eq (Scalar a) 
Floating a => Floating (Scalar a) 
Fractional a => Fractional (Scalar a) 
Num a => Num (Scalar a) 
Ord a => Ord (Scalar a) 
Real a => Real (Scalar a) 
RealFloat a => RealFloat (Scalar a) 
RealFrac a => RealFrac (Scalar a) 
Show a => Show (Scalar a) 
IsString a => IsString (Scalar a) 

newtype Shape

Shape (dimensions) of a tensor.

Constructors

Shape [Int64] 

Lists

data ListOf f as where

A heterogeneous list type.

Constructors

Nil :: ListOf f `[]` 
(:/) :: f a -> ListOf f as -> ListOf f (a : as) infixr 5 

Instances

All Eq (Map f as) => Eq (ListOf f as) 
All Show (Map f as) => Show (ListOf f as) 
BuildInputs (ListOf (Tensor v) as) 
TensorTypes as => PureResult (TensorList Build as) 
(Rendered v, TensorTypes as) => BuildResult (TensorList v as) 
(Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f ((:) * a as)) 
Nodes (ListOf f ([] *)) 
(~) * l (List ([] *)) => Fetchable (ListOf f ([] *)) l 
(Fetchable (f t) a, Fetchable (ListOf f ts) (List as), (~) (* -> *) i Identity) => Fetchable (ListOf f ((:) * t ts)) (ListOf i ((:) * a as)) 

(/:/) :: a -> List as -> List (a : as) infixr 5

Equivalent of :/ for lists.

data TensorTypeProxy a where

class TensorTypes ts where

Instances

TensorTypes ([] *) 
(TensorType t, TensorTypes ts) => TensorTypes ((:) * t ts)

A constraint that the input is a list of TensorTypes.

fromTensorTypes :: forall as. TensorTypes as => Proxy as -> [DataType]

Type constraints

type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a)

A Constraint specifying the possible choices of a TensorType.

We implement a Constraint like OneOf '[Double, Float] a by turning the - natural representation as a conjunction, i.e.,

   a == Double || a == Float
-

into a disjunction like

    a /= Int32 && a /= Int64 && a /= ByteString && ...
-

using an enumeration of all the possible TensorTypes.

type family a /= b :: Constraint

A constraint checking that two types are different.

Equations

a /= a = TypeError a ~ ExcludedCase 
a /= b = () 

type OneOfs ts as = (TensorTypes as, TensorTypes ts, NoneOfs (AllTensorTypes \\ ts) as)

Implementation of constraints

data TypeError a

Helper types to produce a reasonable type error message when the Constraint - "a /= a" fails. - TODO(judahjacobson): Use ghc-8's CustomTypeErrors for this.

type family NoneOf ts a :: Constraint

A constraint that the type a doesn't appear in the type list ts. - Assumes that a and each of the elements of ts are TensorTypes.

Equations

NoneOf (t1 : (t2 : (t3 : (t4 : ts)))) a = (a /= t1, a /= t2, a /= t3, a /= t4, NoneOf ts a) 
NoneOf (t1 : (t2 : (t3 : ts))) a = (a /= t1, a /= t2, a /= t3, NoneOf ts a) 
NoneOf (t1 : (t2 : ts)) a = (a /= t1, a /= t2, NoneOf ts a) 
NoneOf (t1 : ts) a = (a /= t1, NoneOf ts a) 
NoneOf `[]` a = () 

type family as \\ bs

Takes the difference of two lists of types.

Equations

as \\ `[]` = as 
as \\ (b : bs) = Delete b as \\ bs 

type family Delete a as

Removes a type from the given list of types.

Equations

Delete a `[]` = `[]` 
Delete a (a : as) = Delete a as 
Delete a (b : as) = b : Delete a as 

type AllTensorTypes = `[Float, Double, Int8, Int16, Int32, Int64, Word8, Word16, ByteString, Bool]`

An enumeration of all valid TensorTypes.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-124.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-124.html deleted file mode 100644 index 41b9540..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-124.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - |)

tensorflow-0.1.0.0: TensorFlow bindings.

Index - |

|:|TensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-All.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-All.html deleted file mode 100644 index c57d00f..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-All.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index)

tensorflow-0.1.0.0: TensorFlow bindings.

Index

/:/TensorFlow.Types
/=TensorFlow.Types, TensorFlow.Core
:/TensorFlow.Types
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build
addNewOpTensorFlow.Build
addSummaryTensorFlow.Tensor
AllTensorTypesTensorFlow.Types
asGraphDefTensorFlow.Build, TensorFlow.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
AttributeTensorFlow.Types
attrLensTensorFlow.Types
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
BuildInputsTensorFlow.BuildOp
buildInputsTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildResultTensorFlow.BuildOp
buildResultTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
collectAllSummariesTensorFlow.Tensor
colocateWithTensorFlow.Tensor, TensorFlow.Core
ControlNode 
1 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
2 (Data Constructor)TensorFlow.Output, TensorFlow.Build
DataTypeTensorFlow.Types
decodeTensorDataTensorFlow.Types, TensorFlow.Core
DeleteTensorFlow.Types
Device 
1 (Type/Class)TensorFlow.Output, TensorFlow.Core
2 (Data Constructor)TensorFlow.Output, TensorFlow.Core
deviceNameTensorFlow.Output, TensorFlow.Core
DT_BFLOAT16TensorFlow.Types
DT_BFLOAT16_REFTensorFlow.Types
DT_BOOLTensorFlow.Types
DT_BOOL_REFTensorFlow.Types
DT_COMPLEX128TensorFlow.Types
DT_COMPLEX128_REFTensorFlow.Types
DT_COMPLEX64TensorFlow.Types
DT_COMPLEX64_REFTensorFlow.Types
DT_DOUBLETensorFlow.Types
DT_DOUBLE_REFTensorFlow.Types
DT_FLOATTensorFlow.Types
DT_FLOAT_REFTensorFlow.Types
DT_HALFTensorFlow.Types
DT_HALF_REFTensorFlow.Types
DT_INT16TensorFlow.Types
DT_INT16_REFTensorFlow.Types
DT_INT32TensorFlow.Types
DT_INT32_REFTensorFlow.Types
DT_INT64TensorFlow.Types
DT_INT64_REFTensorFlow.Types
DT_INT8TensorFlow.Types
DT_INT8_REFTensorFlow.Types
DT_INVALIDTensorFlow.Types
DT_QINT16TensorFlow.Types
DT_QINT16_REFTensorFlow.Types
DT_QINT32TensorFlow.Types
DT_QINT32_REFTensorFlow.Types
DT_QINT8TensorFlow.Types
DT_QINT8_REFTensorFlow.Types
DT_QUINT16TensorFlow.Types
DT_QUINT16_REFTensorFlow.Types
DT_QUINT8TensorFlow.Types
DT_QUINT8_REFTensorFlow.Types
DT_RESOURCETensorFlow.Types
DT_RESOURCE_REFTensorFlow.Types
DT_STRINGTensorFlow.Types
DT_STRING_REFTensorFlow.Types
DT_UINT16TensorFlow.Types
DT_UINT16_REFTensorFlow.Types
DT_UINT8TensorFlow.Types
DT_UINT8_REFTensorFlow.Types
encodeOutputTensorFlow.Build
encodeTensorDataTensorFlow.Types, TensorFlow.Core
eqLengthGuardTensorFlow.BuildOp
evalBuildTTensorFlow.Build
ExcludedCaseTensorFlow.Types
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
exprTensorFlow.Tensor, TensorFlow.Core
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
Feed 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Type/Class)TensorFlow.Nodes
2 (Data Constructor)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fromTensorTypeListTensorFlow.Types
fromTensorTypesTensorFlow.Types
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getVarIntTensorFlow.Internal.VarInt
GraphStateTensorFlow.Build
groupTensorFlow.ControlFlow, TensorFlow.Core
hoistBuildTTensorFlow.Build
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
ListTensorFlow.Types
ListOfTensorFlow.Types
lookupNodeTensorFlow.Build
MonadBuildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
NilTensorFlow.Types
NodeName 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
NodesTensorFlow.Nodes, TensorFlow.Core
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
noOpTensorFlow.ControlFlow, TensorFlow.Core
OneOfTensorFlow.Types, TensorFlow.Core
OneOfsTensorFlow.Types
opAttrTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
opDefTensorFlow.Build
opDefWithNameTensorFlow.Build
opInputsTensorFlow.Output, TensorFlow.Build
opNameTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
OpParamsTensorFlow.BuildOp
OptionsTensorFlow.Session, TensorFlow.Core
OpType 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
Output 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
outputTensorFlow.Output
outputIndexTensorFlow.Output
OutputIx 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
outputNodeNameTensorFlow.Output
PendingNodeNameTensorFlow.Output
protoShapeTensorFlow.Types
pureOpTensorFlow.BuildOp
PureResultTensorFlow.BuildOp
pureResultTensorFlow.BuildOp
putVarIntTensorFlow.Internal.VarInt
Ref 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
renderTensorFlow.Tensor, TensorFlow.Core
RenderedTensorFlow.Tensor
renderedTensorFlow.Tensor
renderedNodeDefsTensorFlow.Build
renderedOutputTensorFlow.Tensor
renderValueTensorFlow.Tensor
ResourceHandle 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
run 
1 (Function)TensorFlow.Internal.FFI
2 (Function)TensorFlow.Session, TensorFlow.Core
runBuildTTensorFlow.Build
runRefTensorFlow.Tensor
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runValueTensorFlow.Tensor
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
Scalar 
1 (Type/Class)TensorFlow.Types, TensorFlow.Core
2 (Data Constructor)TensorFlow.Types, TensorFlow.Core
Session 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Session, TensorFlow.Core
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
Shape 
1 (Type/Class)TensorFlow.Types, TensorFlow.Core
2 (Data Constructor)TensorFlow.Types, TensorFlow.Core
summariesTensorFlow.Build
SummaryTensorTensorFlow.Tensor
Tensor 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
TensorData 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Data Constructor)TensorFlow.Internal.FFI
3 (Type/Class)TensorFlow.Types, TensorFlow.Core
4 (Data Constructor)TensorFlow.Types
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
TensorDataTypeTensorFlow.Types, TensorFlow.Core
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Data Constructor)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor
TensorListTensorFlow.Tensor
tensorListOutputsTensorFlow.Tensor
tensorNodeNameTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
tensorRefFromNameTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypeListTensorFlow.Types
TensorTypeProxy 
1 (Type/Class)TensorFlow.Types
2 (Data Constructor)TensorFlow.Types
TensorTypesTensorFlow.Types
tensorTypesTensorFlow.Types
tensorValTensorFlow.Types
tensorValueFromNameTensorFlow.Tensor
toBuildTensorFlow.Tensor
TypeErrorTensorFlow.Types
unControlNodeTensorFlow.Output, TensorFlow.Build
UniqueTensorFlow.Build
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
unScalarTensorFlow.Types, TensorFlow.Core
unTensorDataTensorFlow.Types
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
Value 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
valueTensorFlow.Tensor, TensorFlow.Core
withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
withDeviceTensorFlow.Build, TensorFlow.Core
withNameScopeTensorFlow.Build, TensorFlow.Core
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
\\TensorFlow.Types
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_opInputsTensorFlow.Output
_opNameTensorFlow.Output
_opTypeTensorFlow.Output
\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-R.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-R.html deleted file mode 100644 index 991398b..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-R.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - R)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/frames.html b/docs/haddock/tensorflow-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-0.1.0.0/haddock-util.js deleted file mode 100644 index 9a6fccf..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/haddock-util.js +++ /dev/null @@ -1,344 +0,0 @@ -// Haddock JavaScript utilities - -var rspace = /\s\s+/g, - rtrim = /^\s+|\s+$/g; - -function spaced(s) { return (" " + s + " ").replace(rspace, " "); } -function trim(s) { return s.replace(rtrim, ""); } - -function hasClass(elem, value) { - var className = spaced(elem.className || ""); - return className.indexOf( " " + value + " " ) >= 0; -} - -function addClass(elem, value) { - var className = spaced(elem.className || ""); - if ( className.indexOf( " " + value + " " ) < 0 ) { - elem.className = trim(className + " " + value); - } -} - -function removeClass(elem, value) { - var className = spaced(elem.className || ""); - className = className.replace(" " + value + " ", " "); - elem.className = trim(className); -} - -function toggleClass(elem, valueOn, valueOff, bool) { - if (bool == null) { bool = ! hasClass(elem, valueOn); } - if (bool) { - removeClass(elem, valueOff); - addClass(elem, valueOn); - } - else { - removeClass(elem, valueOn); - addClass(elem, valueOff); - } - return bool; -} - - -function makeClassToggle(valueOn, valueOff) -{ - return function(elem, bool) { - return toggleClass(elem, valueOn, valueOff, bool); - } -} - -toggleShow = makeClassToggle("show", "hide"); -toggleCollapser = makeClassToggle("collapser", "expander"); - -function toggleSection(id) -{ - var b = toggleShow(document.getElementById("section." + id)); - toggleCollapser(document.getElementById("control." + id), b); - rememberCollapsed(id, b); - return b; -} - -var collapsed = {}; -function rememberCollapsed(id, b) -{ - if(b) - delete collapsed[id] - else - collapsed[id] = null; - - var sections = []; - for(var i in collapsed) - { - if(collapsed.hasOwnProperty(i)) - sections.push(i); - } - // cookie specific to this page; don't use setCookie which sets path=/ - document.cookie = "collapsed=" + escape(sections.join('+')); -} - -function restoreCollapsed() -{ - var cookie = getCookie("collapsed"); - if(!cookie) - return; - - var ids = cookie.split('+'); - for(var i in ids) - { - if(document.getElementById("section." + ids[i])) - toggleSection(ids[i]); - } -} - -function setCookie(name, value) { - document.cookie = name + "=" + escape(value) + ";path=/;"; -} - -function clearCookie(name) { - document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; -} - -function getCookie(name) { - var nameEQ = name + "="; - var ca = document.cookie.split(';'); - for(var i=0;i < ca.length;i++) { - var c = ca[i]; - while (c.charAt(0)==' ') c = c.substring(1,c.length); - if (c.indexOf(nameEQ) == 0) { - return unescape(c.substring(nameEQ.length,c.length)); - } - } - return null; -} - - - -var max_results = 75; // 50 is not enough to search for map in the base libraries -var shown_range = null; -var last_search = null; - -function quick_search() -{ - perform_search(false); -} - -function full_search() -{ - perform_search(true); -} - - -function perform_search(full) -{ - var text = document.getElementById("searchbox").value.toLowerCase(); - if (text == last_search && !full) return; - last_search = text; - - var table = document.getElementById("indexlist"); - var status = document.getElementById("searchmsg"); - var children = table.firstChild.childNodes; - - // first figure out the first node with the prefix - var first = bisect(-1); - var last = (first == -1 ? -1 : bisect(1)); - - if (first == -1) - { - table.className = ""; - status.innerHTML = "No results found, displaying all"; - } - else if (first == 0 && last == children.length - 1) - { - table.className = ""; - status.innerHTML = ""; - } - else if (last - first >= max_results && !full) - { - table.className = ""; - status.innerHTML = "More than " + max_results + ", press Search to display"; - } - else - { - // decide what you need to clear/show - if (shown_range) - setclass(shown_range[0], shown_range[1], "indexrow"); - setclass(first, last, "indexshow"); - shown_range = [first, last]; - table.className = "indexsearch"; - status.innerHTML = ""; - } - - - function setclass(first, last, status) - { - for (var i = first; i <= last; i++) - { - children[i].className = status; - } - } - - - // do a binary search, treating 0 as ... - // return either -1 (no 0's found) or location of most far match - function bisect(dir) - { - var first = 0, finish = children.length - 1; - var mid, success = false; - - while (finish - first > 3) - { - mid = Math.floor((finish + first) / 2); - - var i = checkitem(mid); - if (i == 0) i = dir; - if (i == -1) - finish = mid; - else - first = mid; - } - var a = (dir == 1 ? first : finish); - var b = (dir == 1 ? finish : first); - for (var i = b; i != a - dir; i -= dir) - { - if (checkitem(i) == 0) return i; - } - return -1; - } - - - // from an index, decide what the result is - // 0 = match, -1 is lower, 1 is higher - function checkitem(i) - { - var s = getitem(i).toLowerCase().substr(0, text.length); - if (s == text) return 0; - else return (s > text ? -1 : 1); - } - - - // from an index, get its string - // this abstracts over alternates - function getitem(i) - { - for ( ; i >= 0; i--) - { - var s = children[i].firstChild.firstChild.data; - if (s.indexOf(' ') == -1) - return s; - } - return ""; // should never be reached - } -} - -function setSynopsis(filename) { - if (parent.window.synopsis) { - if (parent.window.synopsis.location.replace) { - // In Firefox this avoids adding the change to the history. - parent.window.synopsis.location.replace(filename); - } else { - parent.window.synopsis.location = filename; - } - } -} - -function addMenuItem(html) { - var menu = document.getElementById("page-menu"); - if (menu) { - var btn = menu.firstChild.cloneNode(false); - btn.innerHTML = html; - menu.appendChild(btn); - } -} - -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - -function styles() { - var i, a, es = document.getElementsByTagName("link"), rs = []; - for (i = 0; a = es[i]; i++) { - if(a.rel.indexOf("style") != -1 && a.title) { - rs.push(a); - } - } - return rs; -} - -function addStyleMenu() { - var as = styles(); - var i, a, btns = ""; - for(i=0; a = as[i]; i++) { - btns += "
  • " - + a.title + "
  • " - } - if (as.length > 1) { - var h = "
    " - + "Style ▾" - + "" - + "
    "; - addMenuItem(h); - } -} - -function setActiveStyleSheet(title) { - var as = styles(); - var i, a, found; - for(i=0; a = as[i]; i++) { - a.disabled = true; - // need to do this always, some browsers are edge triggered - if(a.title == title) { - found = a; - } - } - if (found) { - found.disabled = false; - setCookie("haddock-style", title); - } - else { - as[0].disabled = false; - clearCookie("haddock-style"); - } - styleMenu(false); -} - -function resetStyle() { - var s = getCookie("haddock-style"); - if (s) setActiveStyleSheet(s); -} - - -function styleMenu(show) { - var m = document.getElementById('style-menu'); - if (m) toggleShow(m, show); -} - - -function pageLoad() { - addStyleMenu(); - adjustForFrames(); - resetStyle(); - restoreCollapsed(); -} - diff --git a/docs/haddock/tensorflow-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-0.1.0.0/index-frames.html deleted file mode 100644 index 6d0b43a..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-0.1.0.0: TensorFlow bindings.

    Modules

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/index.html b/docs/haddock/tensorflow-0.1.0.0/index.html deleted file mode 100644 index 1c9a41c..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/index.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-0.1.0.0: TensorFlow bindings.

    tensorflow-0.1.0.0: TensorFlow bindings.

    tensorflow-0.1.0.0: TensorFlow bindings.

    Please see README.md

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Core.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Core.html deleted file mode 100644 index 04eef8c..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Core.html +++ /dev/null @@ -1,4 +0,0 @@ -TensorFlow.Core

    TensorFlow.Core

    Session

    data Session a

    data Options

    Building graphs

    class MonadBuild m

    Running graphs

    class Fetchable t a

    class Nodes t

    data Feed

    Async

    Build

    type Build

    data BuildT m a

    Tensor

    data Tensor v a

    data Value a

    data Ref a

    Element types

    class TensorType a

    data TensorData a

    class TensorDataType s a

    data Scalar a

    data Shape

    type OneOf ts a

    type family a /= b :: Constraint

    Op combinators

    data Device

    Dependencies

    Misc

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html deleted file mode 100644 index b148171..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html +++ /dev/null @@ -1,4 +0,0 @@ -TensorFlow.Session

    TensorFlow.Session

    data Session a

    data Options

    class MonadBuild m

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html deleted file mode 100644 index 8442ee8..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html +++ /dev/null @@ -1,4 +0,0 @@ -TensorFlow.Tensor

    TensorFlow.Tensor

    data Tensor v a

    data Value a

    data Ref a

    data Feed

    class Rendered v

    type TensorList v

    class TensorKind v

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html deleted file mode 100644 index bb67294..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html +++ /dev/null @@ -1,4 +0,0 @@ -TensorFlow.Types

    TensorFlow.Types

    class TensorType a

    data TensorData a

    class TensorDataType s a

    data Scalar a

    data Shape

    class Attribute a

    Lists

    data ListOf f as

    type List

    class TensorTypes ts

    Type constraints

    type OneOf ts a

    type family a /= b :: Constraint

    type OneOfs ts as

    Implementation of constraints

    data TypeError a

    type family NoneOf ts a :: Constraint

    type family as \\ bs

    type family Delete a as

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/ocean.css b/docs/haddock/tensorflow-0.1.0.0/ocean.css deleted file mode 100644 index 1110b40..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/ocean.css +++ /dev/null @@ -1,600 +0,0 @@ -/* @group Fundamentals */ - -* { margin: 0; padding: 0 } - -/* Is this portable? */ -html { - background-color: white; - width: 100%; - height: 100%; -} - -body { - background: white; - color: black; - text-align: left; - min-height: 100%; - position: relative; -} - -p { - margin: 0.8em 0; -} - -ul, ol { - margin: 0.8em 0 0.8em 2em; -} - -dl { - margin: 0.8em 0; -} - -dt { - font-weight: bold; -} -dd { - margin-left: 2em; -} - -a { text-decoration: none; } -a[href]:link { color: rgb(196,69,29); } -a[href]:visited { color: rgb(171,105,84); } -a[href]:hover { text-decoration:underline; } - -/* @end */ - -/* @group Fonts & Sizes */ - -/* Basic technique & IE workarounds from YUI 3 - For reasons, see: - http://yui.yahooapis.com/3.1.1/build/cssfonts/fonts.css - */ - -body { - font:13px/1.4 sans-serif; - *font-size:small; /* for IE */ - *font:x-small; /* for IE in quirks mode */ -} - -h1 { font-size: 146.5%; /* 19pt */ } -h2 { font-size: 131%; /* 17pt */ } -h3 { font-size: 116%; /* 15pt */ } -h4 { font-size: 100%; /* 13pt */ } -h5 { font-size: 100%; /* 13pt */ } - -select, input, button, textarea { - font:99% sans-serif; -} - -table { - font-size:inherit; - font:100%; -} - -pre, code, kbd, samp, tt, .src { - font-family:monospace; - *font-size:108%; - line-height: 124%; -} - -.links, .link { - font-size: 85%; /* 11pt */ -} - -#module-header .caption { - font-size: 182%; /* 24pt */ -} - -.info { - font-size: 85%; /* 11pt */ -} - -#table-of-contents, #synopsis { - /* font-size: 85%; /* 11pt */ -} - - -/* @end */ - -/* @group Common */ - -.caption, h1, h2, h3, h4, h5, h6 { - font-weight: bold; - color: rgb(78,98,114); - margin: 0.8em 0 0.4em; -} - -* + h1, * + h2, * + h3, * + h4, * + h5, * + h6 { - margin-top: 2em; -} - -h1 + h2, h2 + h3, h3 + h4, h4 + h5, h5 + h6 { - margin-top: inherit; -} - -ul.links { - list-style: none; - text-align: left; - float: right; - display: inline-table; - margin: 0 0 0 1em; -} - -ul.links li { - display: inline; - border-left: 1px solid #d5d5d5; - white-space: nowrap; - padding: 0; -} - -ul.links li a { - padding: 0.2em 0.5em; -} - -.hide { display: none; } -.show { display: inherit; } -.clear { clear: both; } - -.collapser { - background-image: url(minus.gif); - background-repeat: no-repeat; -} -.expander { - background-image: url(plus.gif); - background-repeat: no-repeat; -} -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} -.collapser, .expander { - padding-left: 14px; - margin-left: -14px; - cursor: pointer; -} - -pre { - padding: 0.25em; - margin: 0.8em 0; - background: rgb(229,237,244); - overflow: auto; - border-bottom: 0.25em solid white; - /* white border adds some space below the box to compensate - for visual extra space that paragraphs have between baseline - and the bounding box */ -} - -.src { - background: #f0f0f0; - padding: 0.2em 0.5em; -} - -.keyword { font-weight: normal; } -.def { font-weight: bold; } - - -/* @end */ - -/* @group Page Structure */ - -#content { - margin: 0 auto; - padding: 0 2em 6em; -} - -#package-header { - background: rgb(41,56,69); - border-top: 5px solid rgb(78,98,114); - color: #ddd; - padding: 0.2em; - position: relative; - text-align: left; -} - -#package-header .caption { - background: url(hslogo-16.png) no-repeat 0em; - color: white; - margin: 0 2em; - font-weight: normal; - font-style: normal; - padding-left: 2em; -} - -#package-header a:link, #package-header a:visited { color: white; } -#package-header a:hover { background: rgb(78,98,114); } - -#module-header .caption { - color: rgb(78,98,114); - font-weight: bold; - border-bottom: 1px solid #ddd; -} - -table.info { - float: right; - padding: 0.5em 1em; - border: 1px solid #ddd; - color: rgb(78,98,114); - background-color: #fff; - max-width: 40%; - border-spacing: 0; - position: relative; - top: -0.5em; - margin: 0 0 0 2em; -} - -.info th { - padding: 0 1em 0 0; -} - -div#style-menu-holder { - position: relative; - z-index: 2; - display: inline; -} - -#style-menu { - position: absolute; - z-index: 1; - overflow: visible; - background: #374c5e; - margin: 0; - text-align: center; - right: 0; - padding: 0; - top: 1.25em; -} - -#style-menu li { - display: list-item; - border-style: none; - margin: 0; - padding: 0; - color: #000; - list-style-type: none; -} - -#style-menu li + li { - border-top: 1px solid #919191; -} - -#style-menu a { - width: 6em; - padding: 3px; - display: block; -} - -#footer { - background: #ddd; - border-top: 1px solid #aaa; - padding: 0.5em 0; - color: #666; - text-align: center; - position: absolute; - bottom: 0; - width: 100%; - height: 3em; -} - -/* @end */ - -/* @group Front Matter */ - -#table-of-contents { - float: right; - clear: right; - background: #faf9dc; - border: 1px solid #d8d7ad; - padding: 0.5em 1em; - max-width: 20em; - margin: 0.5em 0 1em 1em; -} - -#table-of-contents .caption { - text-align: center; - margin: 0; -} - -#table-of-contents ul { - list-style: none; - margin: 0; -} - -#table-of-contents ul ul { - margin-left: 2em; -} - -#description .caption { - display: none; -} - -#synopsis { - display: none; -} - -.no-frame #synopsis { - display: block; - position: fixed; - right: 0; - height: 80%; - top: 10%; - padding: 0; - max-width: 75%; -} - -#synopsis .caption { - float: left; - width: 29px; - color: rgba(255,255,255,0); - height: 110px; - margin: 0; - font-size: 1px; - padding: 0; -} - -#synopsis p.caption.collapser { - background: url(synopsis.png) no-repeat -64px -8px; -} - -#synopsis p.caption.expander { - background: url(synopsis.png) no-repeat 0px -8px; -} - -#synopsis ul { - height: 100%; - overflow: auto; - padding: 0.5em; - margin: 0; -} - -#synopsis ul ul { - overflow: hidden; -} - -#synopsis ul, -#synopsis ul li.src { - background-color: #faf9dc; - white-space: nowrap; - list-style: none; - margin-left: 0; -} - -/* @end */ - -/* @group Main Content */ - -#interface div.top { margin: 2em 0; } -#interface h1 + div.top, -#interface h2 + div.top, -#interface h3 + div.top, -#interface h4 + div.top, -#interface h5 + div.top { - margin-top: 1em; -} -#interface p.src .link { - float: right; - color: #919191; - border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; -} - -#interface td.src .link { - float: right; - color: #919191; - border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; -} - -#interface span.fixity { - color: #919191; - border-left: 1px solid #919191; - padding: 0.2em 0.5em 0.2em 0.5em; - margin: 0 -1em 0 1em; -} - -#interface span.rightedge { - border-left: 1px solid #919191; - padding: 0.2em 0 0.2em 0; - margin: 0 0 0 1em; -} - -#interface table { border-spacing: 2px; } -#interface td { - vertical-align: top; - padding-left: 0.5em; -} -#interface td.src { - white-space: nowrap; -} -#interface td.doc p { - margin: 0; -} -#interface td.doc p + p { - margin-top: 0.8em; -} - -.clearfix:after { - clear: both; - content: " "; - display: block; - height: 0; - visibility: hidden; -} - -.subs dl { - margin: 0; -} - -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; -} - -.subs dd { - float: right; - width: 90%; - display: block; - padding-left: 0.5em; - margin-bottom: 0.5em; -} - -.subs dd.empty { - display: none; -} - -.subs dd p { - margin: 0; -} - -/* Render short-style data instances */ -.inst ul { - height: 100%; - padding: 0.5em; - margin: 0; -} - -.inst, .inst li { - list-style: none; - margin-left: 1em; -} - -/* Workaround for bug in Firefox (issue #384) */ -.inst-left { - float: left; -} - -.top p.src { - border-top: 1px solid #ccc; -} - -.subs, .doc { - /* use this selector for one level of indent */ - padding-left: 2em; -} - -.warning { - color: red; -} - -.arguments { - margin-top: -0.4em; -} -.arguments .caption { - display: none; -} - -.fields { padding-left: 1em; } - -.fields .caption { display: none; } - -.fields p { margin: 0 0; } - -/* this seems bulky to me -.methods, .constructors { - background: #f8f8f8; - border: 1px solid #eee; -} -*/ - -/* @end */ - -/* @group Auxillary Pages */ - - -.extension-list { - list-style-type: none; - margin-left: 0; -} - -#mini { - margin: 0 auto; - padding: 0 1em 1em; -} - -#mini > * { - font-size: 93%; /* 12pt */ -} - -#mini #module-list .caption, -#mini #module-header .caption { - font-size: 125%; /* 15pt */ -} - -#mini #interface h1, -#mini #interface h2, -#mini #interface h3, -#mini #interface h4 { - font-size: 109%; /* 13pt */ - margin: 1em 0 0; -} - -#mini #interface .top, -#mini #interface .src { - margin: 0; -} - -#mini #module-list ul { - list-style: none; - margin: 0; -} - -#alphabet ul { - list-style: none; - padding: 0; - margin: 0.5em 0 0; - text-align: center; -} - -#alphabet li { - display: inline; - margin: 0 0.25em; -} - -#alphabet a { - font-weight: bold; -} - -#index .caption, -#module-list .caption { font-size: 131%; /* 17pt */ } - -#index table { - margin-left: 2em; -} - -#index .src { - font-weight: bold; -} -#index .alt { - font-size: 77%; /* 10pt */ - font-style: italic; - padding-left: 2em; -} - -#index td + td { - padding-left: 1em; -} - -#module-list ul { - list-style: none; - margin: 0 0 0 2em; -} - -#module-list li { - clear: right; -} - -#module-list span.collapser, -#module-list span.expander { - background-position: 0 0.3em; -} - -#module-list .package { - float: right; -} - -/* @end */ diff --git a/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt b/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt deleted file mode 100644 index 5986428..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt +++ /dev/null @@ -1,942 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | TensorFlow bindings. --- --- Please see README.md -@package tensorflow -@version 0.1.0.0 - - --- | Originally taken from internal proto-lens code. -module TensorFlow.Internal.VarInt - --- | Decode an unsigned varint. -getVarInt :: Parser Word64 - --- | Encode a Word64. -putVarInt :: Word64 -> Builder - -module TensorFlow.Internal.FFI -data TensorFlowException -TensorFlowException :: Code -> Text -> TensorFlowException -data Session - --- | Runs the given action after creating a session with options populated --- by the given optionSetter. -withSession :: (SessionOptions -> IO ()) -> ((IO () -> IO ()) -> Session -> IO a) -> IO a -extendGraph :: Session -> GraphDef -> IO () -run :: Session -> [(ByteString, TensorData)] -> [ByteString] -> [ByteString] -> IO [TensorData] - --- | All of the data needed to represent a tensor. -data TensorData -TensorData :: [Int64] -> !DataType -> !(Vector Word8) -> TensorData -[tensorDataDimensions] :: TensorData -> [Int64] -[tensorDataType] :: TensorData -> !DataType -[tensorDataBytes] :: TensorData -> !(Vector Word8) -setSessionConfig :: ConfigProto -> SessionOptions -> IO () -setSessionTarget :: ByteString -> SessionOptions -> IO () - --- | Returns the serialized OpList of all OpDefs defined in this address --- space. -getAllOpList :: IO ByteString - --- | Serializes the given msg and provides it as (ptr,len) argument to the --- given action. -useProtoAsVoidPtrLen :: (Message msg, Integral c, Show c, Bits c) => msg -> (Ptr b -> c -> IO a) -> IO a -instance GHC.Classes.Eq TensorFlow.Internal.FFI.TensorData -instance GHC.Show.Show TensorFlow.Internal.FFI.TensorData -instance GHC.Classes.Eq TensorFlow.Internal.FFI.TensorFlowException -instance GHC.Show.Show TensorFlow.Internal.FFI.TensorFlowException -instance GHC.Exception.Exception TensorFlow.Internal.FFI.TensorFlowException - -module TensorFlow.Types - --- | The class of scalar types supported by tensorflow. -class TensorType a -tensorType :: TensorType a => a -> DataType -tensorRefType :: TensorType a => a -> DataType -tensorVal :: TensorType a => Lens' TensorProto [a] - --- | Tensor data with the correct memory layout for tensorflow. -newtype TensorData a -TensorData :: TensorData -> TensorData a -[unTensorData] :: TensorData a -> TensorData - --- | Types that can be converted to and from TensorData. --- --- Vector is the most efficient to encode/decode for most element --- types. -class TensorType a => TensorDataType s a - --- | Decode the bytes of a TensorData into an s. -decodeTensorData :: TensorDataType s a => TensorData a -> s a - --- | Encode an s into a TensorData. --- --- The values should be in row major order, e.g., --- --- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ... -encodeTensorData :: TensorDataType s a => Shape -> s a -> TensorData a -newtype Scalar a -Scalar :: a -> Scalar a -[unScalar] :: Scalar a -> a - --- | Shape (dimensions) of a tensor. -newtype Shape -Shape :: [Int64] -> Shape -protoShape :: Lens' TensorShapeProto Shape -class Attribute a -attrLens :: Attribute a => Lens' AttrValue a -data DataType :: * -DT_INVALID :: DataType -DT_FLOAT :: DataType -DT_DOUBLE :: DataType -DT_INT32 :: DataType -DT_UINT8 :: DataType -DT_INT16 :: DataType -DT_INT8 :: DataType -DT_STRING :: DataType -DT_COMPLEX64 :: DataType -DT_INT64 :: DataType -DT_BOOL :: DataType -DT_QINT8 :: DataType -DT_QUINT8 :: DataType -DT_QINT32 :: DataType -DT_BFLOAT16 :: DataType -DT_QINT16 :: DataType -DT_QUINT16 :: DataType -DT_UINT16 :: DataType -DT_COMPLEX128 :: DataType -DT_HALF :: DataType -DT_RESOURCE :: DataType -DT_FLOAT_REF :: DataType -DT_DOUBLE_REF :: DataType -DT_INT32_REF :: DataType -DT_UINT8_REF :: DataType -DT_INT16_REF :: DataType -DT_INT8_REF :: DataType -DT_STRING_REF :: DataType -DT_COMPLEX64_REF :: DataType -DT_INT64_REF :: DataType -DT_BOOL_REF :: DataType -DT_QINT8_REF :: DataType -DT_QUINT8_REF :: DataType -DT_QINT32_REF :: DataType -DT_BFLOAT16_REF :: DataType -DT_QINT16_REF :: DataType -DT_QUINT16_REF :: DataType -DT_UINT16_REF :: DataType -DT_COMPLEX128_REF :: DataType -DT_HALF_REF :: DataType -DT_RESOURCE_REF :: DataType - --- | A heterogeneous list type. -data ListOf f as -Nil :: ListOf f '[] -(:/) :: f a -> ListOf f as -> ListOf f (a : as) -type List = ListOf Identity - --- | Equivalent of :/ for lists. -(/:/) :: a -> List as -> List (a : as) -data TensorTypeProxy a -TensorTypeProxy :: TensorTypeProxy a -class TensorTypes (ts :: [*]) -tensorTypes :: TensorTypes ts => TensorTypeList ts -type TensorTypeList = ListOf TensorTypeProxy -fromTensorTypeList :: TensorTypeList ts -> [DataType] -fromTensorTypes :: TensorTypes as => Proxy as -> [DataType] - --- | A Constraint specifying the possible choices of a --- TensorType. --- --- We implement a Constraint like OneOf '[Double, Float] --- a by turning the natural representation as a conjunction, i.e., --- ---
    ---   a == Double || a == Float
    ---   
    --- --- into a disjunction like --- ---
    ---   a /= Int32 && a /= Int64 && a /= ByteString && ...
    ---   
    --- --- using an enumeration of all the possible TensorTypes. -type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a) - --- | A constraint checking that two types are different. -type OneOfs ts as = (TensorTypes as, TensorTypes ts, NoneOfs (AllTensorTypes \\ ts) as) - --- | Helper types to produce a reasonable type error message when the --- Constraint "a /= a" fails. TODO(judahjacobson): Use ghc-8's --- CustomTypeErrors for this. -data TypeError a -data ExcludedCase - --- | A constraint that the type a doesn't appear in the type list --- ts. Assumes that a and each of the elements of --- ts are TensorTypes. - --- | Takes the difference of two lists of types. - --- | Removes a type from the given list of types. - --- | An enumeration of all valid TensorTypes. -type AllTensorTypes = '[Float, Double, Int8, Int16, Int32, Int64, Word8, Word16, ByteString, Bool] -instance GHC.Show.Show TensorFlow.Types.Shape -instance Data.String.IsString a => Data.String.IsString (TensorFlow.Types.Scalar a) -instance GHC.Real.RealFrac a => GHC.Real.RealFrac (TensorFlow.Types.Scalar a) -instance GHC.Float.RealFloat a => GHC.Float.RealFloat (TensorFlow.Types.Scalar a) -instance GHC.Real.Real a => GHC.Real.Real (TensorFlow.Types.Scalar a) -instance GHC.Float.Floating a => GHC.Float.Floating (TensorFlow.Types.Scalar a) -instance GHC.Real.Fractional a => GHC.Real.Fractional (TensorFlow.Types.Scalar a) -instance GHC.Num.Num a => GHC.Num.Num (TensorFlow.Types.Scalar a) -instance GHC.Classes.Ord a => GHC.Classes.Ord (TensorFlow.Types.Scalar a) -instance GHC.Classes.Eq a => GHC.Classes.Eq (TensorFlow.Types.Scalar a) -instance GHC.Show.Show a => GHC.Show.Show (TensorFlow.Types.Scalar a) -instance TensorFlow.Types.TensorType GHC.Types.Float -instance TensorFlow.Types.TensorType GHC.Types.Double -instance TensorFlow.Types.TensorType GHC.Int.Int32 -instance TensorFlow.Types.TensorType GHC.Int.Int64 -instance TensorFlow.Types.TensorType GHC.Word.Word8 -instance TensorFlow.Types.TensorType GHC.Word.Word16 -instance TensorFlow.Types.TensorType GHC.Int.Int16 -instance TensorFlow.Types.TensorType GHC.Int.Int8 -instance TensorFlow.Types.TensorType Data.ByteString.Internal.ByteString -instance TensorFlow.Types.TensorType GHC.Types.Bool -instance TensorFlow.Types.TensorType (Data.Complex.Complex GHC.Types.Float) -instance TensorFlow.Types.TensorType (Data.Complex.Complex GHC.Types.Double) -instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Types.Float -instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Types.Double -instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Int.Int8 -instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Int.Int16 -instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Int.Int32 -instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Int.Int64 -instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Word.Word8 -instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Word.Word16 -instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Types.Bool -instance (Foreign.Storable.Storable a, TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector a) => TensorFlow.Types.TensorDataType Data.Vector.Vector a -instance TensorFlow.Types.TensorDataType Data.Vector.Vector (Data.Complex.Complex GHC.Types.Float) -instance TensorFlow.Types.TensorDataType Data.Vector.Vector (Data.Complex.Complex GHC.Types.Double) -instance TensorFlow.Types.TensorDataType Data.Vector.Vector Data.ByteString.Internal.ByteString -instance TensorFlow.Types.TensorDataType Data.Vector.Vector a => TensorFlow.Types.TensorDataType TensorFlow.Types.Scalar a -instance GHC.Exts.IsList TensorFlow.Types.Shape -instance TensorFlow.Types.Attribute GHC.Types.Float -instance TensorFlow.Types.Attribute Data.ByteString.Internal.ByteString -instance TensorFlow.Types.Attribute GHC.Int.Int64 -instance TensorFlow.Types.Attribute Proto.Tensorflow.Core.Framework.Types.DataType -instance TensorFlow.Types.Attribute Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance TensorFlow.Types.Attribute GHC.Types.Bool -instance TensorFlow.Types.Attribute TensorFlow.Types.Shape -instance TensorFlow.Types.Attribute Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance TensorFlow.Types.Attribute [Proto.Tensorflow.Core.Framework.Types.DataType] -instance TensorFlow.Types.Attribute [GHC.Int.Int64] -instance TensorFlow.Types.All GHC.Classes.Eq (TensorFlow.Types.Map f as) => GHC.Classes.Eq (TensorFlow.Types.ListOf f as) -instance TensorFlow.Types.All GHC.Show.Show (TensorFlow.Types.Map f as) => GHC.Show.Show (TensorFlow.Types.ListOf f as) -instance TensorFlow.Types.TensorTypes '[] -instance (TensorFlow.Types.TensorType t, TensorFlow.Types.TensorTypes ts) => TensorFlow.Types.TensorTypes (t : ts) - -module TensorFlow.Output - --- | A type of graph node which has no outputs. These nodes are valuable --- for causing side effects when they are run. -newtype ControlNode -ControlNode :: NodeName -> ControlNode -[unControlNode] :: ControlNode -> NodeName - --- | A device that a node can be assigned to. There's a naming convention --- where the device names are constructed from job and replica names. -newtype Device -Device :: Text -> Device -[deviceName] :: Device -> Text - --- | The name of a node in the graph. This corresponds to the proto field --- NodeDef.name. Includes the scope prefix (if any) and a unique --- identifier (if the node was implicitly named). -newtype NodeName -NodeName :: Text -> NodeName -[unNodeName] :: NodeName -> Text - --- | Op definition. This corresponds somewhat to the NodeDef --- proto. -data OpDef -OpDef :: !PendingNodeName -> !OpType -> !(Map Text AttrValue) -> [Output] -> [NodeName] -> OpDef -[_opName] :: OpDef -> !PendingNodeName -[_opType] :: OpDef -> !OpType -[_opAttrs] :: OpDef -> !(Map Text AttrValue) -[_opInputs] :: OpDef -> [Output] -[_opControlInputs] :: OpDef -> [NodeName] -opName :: Lens' OpDef PendingNodeName -opType :: Lens' OpDef OpType -opAttr :: Attribute a => Text -> Lens' OpDef a -opInputs :: Lens' OpDef [Output] -opControlInputs :: Lens' OpDef [NodeName] - --- | The type of op of a node in the graph. This corresponds to the proto --- field NodeDef.op. -newtype OpType -OpType :: Text -> OpType -[unOpType] :: OpType -> Text -newtype OutputIx -OutputIx :: Int -> OutputIx -[unOutputIx] :: OutputIx -> Int - --- | An output of a TensorFlow node. -data Output -Output :: !OutputIx -> !NodeName -> Output -[outputIndex] :: Output -> !OutputIx -[outputNodeName] :: Output -> !NodeName -output :: OutputIx -> NodeName -> Output - --- | The name specified for an unrendered Op. If an Op has an ImplicitName, --- it will be assigned based on the opType plus a unique identifier. Does --- not contain the "scope" prefix. -data PendingNodeName -ExplicitName :: !Text -> PendingNodeName -ImplicitName :: PendingNodeName - --- | Opaque handle to a mutable resource in the graph. Typical such --- resources are variables. -newtype ResourceHandle -ResourceHandle :: Output -> ResourceHandle -instance GHC.Classes.Ord TensorFlow.Output.OpDef -instance GHC.Classes.Eq TensorFlow.Output.OpDef -instance GHC.Show.Show TensorFlow.Output.Output -instance GHC.Classes.Ord TensorFlow.Output.Output -instance GHC.Classes.Eq TensorFlow.Output.Output -instance GHC.Show.Show TensorFlow.Output.NodeName -instance GHC.Classes.Ord TensorFlow.Output.NodeName -instance GHC.Classes.Eq TensorFlow.Output.NodeName -instance GHC.Show.Show TensorFlow.Output.PendingNodeName -instance GHC.Classes.Ord TensorFlow.Output.PendingNodeName -instance GHC.Classes.Eq TensorFlow.Output.PendingNodeName -instance Data.String.IsString TensorFlow.Output.Device -instance GHC.Classes.Ord TensorFlow.Output.Device -instance GHC.Classes.Eq TensorFlow.Output.Device -instance GHC.Show.Show TensorFlow.Output.OutputIx -instance GHC.Enum.Enum TensorFlow.Output.OutputIx -instance GHC.Num.Num TensorFlow.Output.OutputIx -instance GHC.Classes.Ord TensorFlow.Output.OutputIx -instance GHC.Classes.Eq TensorFlow.Output.OutputIx -instance GHC.Show.Show TensorFlow.Output.OpType -instance GHC.Classes.Ord TensorFlow.Output.OpType -instance GHC.Classes.Eq TensorFlow.Output.OpType -instance Data.String.IsString TensorFlow.Output.OpType -instance GHC.Show.Show TensorFlow.Output.Device -instance Data.String.IsString TensorFlow.Output.PendingNodeName -instance Data.String.IsString TensorFlow.Output.Output - -module TensorFlow.Build - --- | A type of graph node which has no outputs. These nodes are valuable --- for causing side effects when they are run. -newtype ControlNode -ControlNode :: NodeName -> ControlNode -[unControlNode] :: ControlNode -> NodeName -data Unique -explicitName :: Text -> PendingNodeName -implicitName :: PendingNodeName -opDef :: OpType -> OpDef -opDefWithName :: PendingNodeName -> OpType -> OpDef -opName :: Lens' OpDef PendingNodeName -opType :: Lens' OpDef OpType -opAttr :: Attribute a => Text -> Lens' OpDef a -opInputs :: Lens' OpDef [Output] -opControlInputs :: Lens' OpDef [NodeName] -data GraphState -renderedNodeDefs :: Lens' GraphState (Map NodeName NodeDef) - --- | An action for building nodes in a TensorFlow graph. Used to manage --- build state internally as part of the Session monad. -data BuildT m a - --- | An action for building nodes in a TensorFlow graph. -type Build = BuildT Identity - --- | Lift a Build action into a monad, including any explicit op --- renderings. -class Monad m => MonadBuild m -build :: MonadBuild m => Build a -> m a - --- | Registers the given node to be executed before the next run. -addInitializer :: MonadBuild m => ControlNode -> m () - --- | This is Control.Monad.Morph.hoist sans the dependency. -hoistBuildT :: (forall a. m a -> n a) -> BuildT m b -> BuildT n b -evalBuildT :: Monad m => BuildT m a -> m a -runBuildT :: BuildT m a -> m (a, GraphState) - --- | Produce a GraphDef proto representation of the nodes that are rendered --- in the given Build action. -asGraphDef :: Build a -> GraphDef -addGraphDef :: MonadBuild m => GraphDef -> m () - --- | Get all the initializers that have accumulated so far, and clear that --- buffer. -flushInitializers :: Monad m => BuildT m [NodeName] - --- | Get all the NodeDefs that have accumulated so far, and clear that --- buffer. -flushNodeBuffer :: MonadBuild m => m [NodeDef] -summaries :: Lens' GraphState [Output] - --- | Render the given op if it hasn't been rendered already, and return its --- name. -getOrAddOp :: OpDef -> Build NodeName - --- | Add a new node for a given OpDef. This is used for making --- "stateful" ops which are not safe to dedup (e.g, "variable" and --- "assign"). -addNewOp :: OpDef -> Build NodeName - --- | Turn an Output into a string representation for the TensorFlow --- foreign APIs. -encodeOutput :: Output -> Text -lookupNode :: NodeName -> Build NodeDef - --- | Modify some part of the state, run an action, and restore the state --- after that action is done. -withStateLens :: MonadBuild m => Lens' GraphState a -> (a -> a) -> m b -> m b - --- | Set a device for all nodes rendered in the given Build action --- (unless further overridden by another use of withDevice). -withDevice :: MonadBuild m => Maybe Device -> m a -> m a - --- | Prepend a scope to all nodes rendered in the given Build --- action. -withNameScope :: MonadBuild m => Text -> m a -> m a - --- | Add control inputs to all nodes rendered in the given Build --- action. -withNodeDependencies :: MonadBuild m => Set NodeName -> m a -> m a -instance Control.Monad.Catch.MonadMask m => Control.Monad.Catch.MonadMask (TensorFlow.Build.BuildT m) -instance Control.Monad.Catch.MonadCatch m => Control.Monad.Catch.MonadCatch (TensorFlow.Build.BuildT m) -instance Control.Monad.Catch.MonadThrow m => Control.Monad.Catch.MonadThrow (TensorFlow.Build.BuildT m) -instance GHC.Base.Monad m => Control.Monad.State.Class.MonadState TensorFlow.Build.GraphState (TensorFlow.Build.BuildT m) -instance Control.Monad.Trans.Class.MonadTrans TensorFlow.Build.BuildT -instance Control.Monad.IO.Class.MonadIO m => Control.Monad.IO.Class.MonadIO (TensorFlow.Build.BuildT m) -instance GHC.Base.Monad m => GHC.Base.Monad (TensorFlow.Build.BuildT m) -instance GHC.Base.Monad m => GHC.Base.Applicative (TensorFlow.Build.BuildT m) -instance GHC.Base.Functor m => GHC.Base.Functor (TensorFlow.Build.BuildT m) -instance GHC.Classes.Ord TensorFlow.Build.PendingNode -instance GHC.Classes.Eq TensorFlow.Build.PendingNode -instance Data.String.IsString TensorFlow.Build.Scope -instance GHC.Classes.Ord TensorFlow.Build.Scope -instance GHC.Classes.Eq TensorFlow.Build.Scope -instance GHC.Enum.Enum TensorFlow.Build.Unique -instance GHC.Classes.Ord TensorFlow.Build.Unique -instance GHC.Classes.Eq TensorFlow.Build.Unique -instance GHC.Show.Show TensorFlow.Build.Scope -instance GHC.Base.Monad m => TensorFlow.Build.MonadBuild (TensorFlow.Build.BuildT m) - -module TensorFlow.Tensor - --- | A named output of a TensorFlow operation. --- --- The type parameter a is the type of the elements in the --- Tensor. The parameter v is either: --- --- --- --- Note that expr, value, render and --- renderValue can help convert between the different types of --- Tensor. -data Tensor v a -Tensor :: v Output -> Tensor v a -[tensorOutput] :: Tensor v a -> v Output -newtype Value a -Value :: a -> Value a -[runValue] :: Value a -> a -newtype Ref a -Ref :: a -> Ref a -[runRef] :: Ref a -> a - --- | Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op. -value :: Tensor Ref a -> Tensor Value a -renderValue :: MonadBuild m => Tensor v a -> m (Tensor Value a) - --- | A pair of a Tensor and some data that should be fed into that --- Tensor when running the graph. -data Feed -Feed :: Output -> TensorData -> Feed - --- | A class ensuring that a given tensor is rendered, i.e., has a fixed --- name, device, etc. -class TensorKind v => Rendered v -rendered :: Rendered v => v a -> a -renderedOutput :: Rendered v => Tensor v a -> Output -tensorNodeName :: Rendered v => Tensor v a -> NodeName - --- | Create a Feed for feeding the given data into a Tensor --- when running the graph. --- --- Note that if a Tensor is rendered, its identity may change; so --- feeding the rendered Tensor may be different than feeding the --- original Tensor. -feed :: Rendered v => Tensor v a -> TensorData a -> Feed - --- | Create a Tensor for a given name. This can be used to reference --- nodes in a GraphDef that was loaded via addGraphDef. --- TODO(judahjacobson): add more safety checks here. -tensorFromName :: TensorKind v => Text -> Tensor v a - --- | Like tensorFromName, but type-restricted to Value. -tensorValueFromName :: Text -> Tensor Value a - --- | Like tensorFromName, but type-restricted to Ref. -tensorRefFromName :: Text -> Tensor Ref a -type TensorList v = ListOf (Tensor v) -tensorListOutputs :: Rendered v => TensorList v as -> [Output] - --- | Places all nodes rendered in the given Build action on the same --- device as the given Tensor (see also withDevice). Make sure --- that the action has side effects of rendering the desired tensors. A --- pure return would not have the desired effect. -colocateWith :: (MonadBuild m, Rendered v) => Tensor v b -> m a -> m a - --- | Render a Tensor, fixing its name, scope, device and control --- inputs from the MonadBuild context. Also renders any --- dependencies of the Tensor that weren't already rendered. --- --- This operation is idempotent; calling render on the same input --- in the same context will produce the same result. However, rendering --- the same Tensor Build in two different contexts may result in --- two different Tensor Values. -render :: MonadBuild m => Tensor Build a -> m (Tensor Value a) -expr :: TensorKind v => Tensor v a -> Tensor Build a - --- | Records the given summary action in Build for retrieval with Summary --- protocol buffer in string form. For safety, use the pre-composed --- functions: Logging.scalarSummary and Logging.histogramSummary. -addSummary :: (MonadBuild m, TensorKind v) => Tensor v ByteString -> m () - --- | Retrieves the summary ops collected thus far. Typically this only --- happens once, but if buildWithSummary is used repeatedly, the --- values accumulate. -collectAllSummaries :: MonadBuild m => m [SummaryTensor] - --- | Synonym for the tensors that return serialized Summary proto. -type SummaryTensor = Tensor Value ByteString - --- | An internal class for kinds of Tensors. -class Monad v => TensorKind v -toBuild :: TensorKind v => v a -> Build a -instance GHC.Base.Functor TensorFlow.Tensor.Ref -instance GHC.Base.Functor TensorFlow.Tensor.Value -instance GHC.Base.Applicative TensorFlow.Tensor.Value -instance GHC.Base.Monad TensorFlow.Tensor.Value -instance GHC.Base.Applicative TensorFlow.Tensor.Ref -instance GHC.Base.Monad TensorFlow.Tensor.Ref -instance TensorFlow.Tensor.Rendered TensorFlow.Tensor.Value -instance TensorFlow.Tensor.Rendered TensorFlow.Tensor.Ref -instance TensorFlow.Tensor.TensorKind TensorFlow.Tensor.Value -instance TensorFlow.Tensor.TensorKind TensorFlow.Tensor.Ref -instance TensorFlow.Tensor.TensorKind TensorFlow.Build.Build - -module TensorFlow.BuildOp - --- | Class of types that can be used as op outputs. -class BuildResult a -buildResult :: BuildResult a => Result a -buildOp :: BuildResult a => [Int64] -> OpDef -> Build a - --- | Class of types that can be used as op outputs. -class PureResult a -pureResult :: PureResult a => ReaderT (Build OpDef) (State ResultState) a -pureOp :: PureResult a => [Int64] -> Build OpDef -> a - --- | Returns true if all the integers in each tuple are identical. Throws --- an error with a descriptive message if not. -eqLengthGuard :: [(String, [(String, Int)])] -> Bool -class BuildInputs a -buildInputs :: BuildInputs a => a -> Build [Output] - --- | Parameters to build an op (for example, the node name or optional --- attributes). TODO: be more type safe. -type OpParams = OpDef -> OpDef -instance GHC.Show.Show TensorFlow.BuildOp.ResultState -instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2) => TensorFlow.BuildOp.BuildResult (a1, a2) -instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3) => TensorFlow.BuildOp.BuildResult (a1, a2, a3) -instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4) -instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4, TensorFlow.BuildOp.BuildResult a5) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4, a5) -instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4, TensorFlow.BuildOp.BuildResult a5, TensorFlow.BuildOp.BuildResult a6) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4, a5, a6) -instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4, TensorFlow.BuildOp.BuildResult a5, TensorFlow.BuildOp.BuildResult a6, TensorFlow.BuildOp.BuildResult a7) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4, a5, a6, a7) -instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4, TensorFlow.BuildOp.BuildResult a5, TensorFlow.BuildOp.BuildResult a6, TensorFlow.BuildOp.BuildResult a7, TensorFlow.BuildOp.BuildResult a8) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4, a5, a6, a7, a8) -instance TensorFlow.BuildOp.BuildResult TensorFlow.Output.ResourceHandle -instance TensorFlow.Tensor.Rendered v => TensorFlow.BuildOp.BuildResult (TensorFlow.Tensor.Tensor v a) -instance TensorFlow.BuildOp.BuildResult TensorFlow.Output.ControlNode -instance (TensorFlow.Tensor.Rendered v, TensorFlow.Types.TensorTypes as) => TensorFlow.BuildOp.BuildResult (TensorFlow.Tensor.TensorList v as) -instance TensorFlow.BuildOp.BuildResult a => TensorFlow.BuildOp.BuildResult [a] -instance TensorFlow.BuildOp.PureResult (TensorFlow.Tensor.Tensor TensorFlow.Build.Build a) -instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2) => TensorFlow.BuildOp.PureResult (a1, a2) -instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3) => TensorFlow.BuildOp.PureResult (a1, a2, a3) -instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4) -instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4, TensorFlow.BuildOp.PureResult a5) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4, a5) -instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4, TensorFlow.BuildOp.PureResult a5, TensorFlow.BuildOp.PureResult a6) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4, a5, a6) -instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4, TensorFlow.BuildOp.PureResult a5, TensorFlow.BuildOp.PureResult a6, TensorFlow.BuildOp.PureResult a7) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4, a5, a6, a7) -instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4, TensorFlow.BuildOp.PureResult a5, TensorFlow.BuildOp.PureResult a6, TensorFlow.BuildOp.PureResult a7, TensorFlow.BuildOp.PureResult a8) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4, a5, a6, a7, a8) -instance TensorFlow.BuildOp.PureResult a => TensorFlow.BuildOp.PureResult [a] -instance TensorFlow.Types.TensorTypes as => TensorFlow.BuildOp.PureResult (TensorFlow.Tensor.TensorList TensorFlow.Build.Build as) -instance TensorFlow.BuildOp.BuildInputs a => TensorFlow.BuildOp.BuildInputs [a] -instance TensorFlow.BuildOp.BuildInputs (TensorFlow.Tensor.Tensor v a) -instance TensorFlow.BuildOp.BuildInputs (TensorFlow.Types.ListOf (TensorFlow.Tensor.Tensor v) as) -instance TensorFlow.BuildOp.BuildInputs TensorFlow.Output.ResourceHandle - -module TensorFlow.Nodes - --- | Types that contain ops which can be run. -class Nodes t -getNodes :: Nodes t => t -> Build (Set NodeName) - --- | Types that tensor representations (e.g. Tensor, --- ControlNode) can be fetched into. --- --- Includes collections of tensors (e.g. tuples). -class Nodes t => Fetchable t a -getFetch :: Fetchable t a => t -> Build (Fetch a) - --- | Fetch action. Keeps track of what needs to be fetched and how to --- decode the fetched data. -data Fetch a -Fetch :: Set Text -> (Map Text TensorData -> a) -> Fetch a - --- | Nodes to fetch -[fetches] :: Fetch a -> Set Text - --- | Function to create an a from the fetched data. -[fetchRestore] :: Fetch a -> Map Text TensorData -> a -nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b -fetchTensorVector :: (TensorType a) => Tensor v a -> Build (Fetch (TensorData a)) -instance GHC.Base.Functor TensorFlow.Nodes.Fetch -instance GHC.Base.Applicative TensorFlow.Nodes.Fetch -instance (TensorFlow.Nodes.Nodes t1, TensorFlow.Nodes.Nodes t2) => TensorFlow.Nodes.Nodes (t1, t2) -instance (TensorFlow.Nodes.Nodes t1, TensorFlow.Nodes.Nodes t2, TensorFlow.Nodes.Nodes t3) => TensorFlow.Nodes.Nodes (t1, t2, t3) -instance (TensorFlow.Nodes.Fetchable t1 a1, TensorFlow.Nodes.Fetchable t2 a2) => TensorFlow.Nodes.Fetchable (t1, t2) (a1, a2) -instance (TensorFlow.Nodes.Fetchable t1 a1, TensorFlow.Nodes.Fetchable t2 a2, TensorFlow.Nodes.Fetchable t3 a3) => TensorFlow.Nodes.Fetchable (t1, t2, t3) (a1, a2, a3) -instance TensorFlow.Nodes.Nodes t => TensorFlow.Nodes.Nodes [t] -instance TensorFlow.Nodes.Fetchable t a => TensorFlow.Nodes.Fetchable [t] [a] -instance TensorFlow.Nodes.Nodes TensorFlow.Output.ControlNode -instance (a ~ ()) => TensorFlow.Nodes.Fetchable TensorFlow.Output.ControlNode a -instance TensorFlow.Nodes.Nodes (TensorFlow.Types.ListOf f '[]) -instance (TensorFlow.Nodes.Nodes (f a), TensorFlow.Nodes.Nodes (TensorFlow.Types.ListOf f as)) => TensorFlow.Nodes.Nodes (TensorFlow.Types.ListOf f (a : as)) -instance (l ~ TensorFlow.Types.List '[]) => TensorFlow.Nodes.Fetchable (TensorFlow.Types.ListOf f '[]) l -instance (TensorFlow.Nodes.Fetchable (f t) a, TensorFlow.Nodes.Fetchable (TensorFlow.Types.ListOf f ts) (TensorFlow.Types.List as), i ~ Data.Functor.Identity.Identity) => TensorFlow.Nodes.Fetchable (TensorFlow.Types.ListOf f (t : ts)) (TensorFlow.Types.ListOf i (a : as)) -instance TensorFlow.Nodes.Nodes (TensorFlow.Tensor.Tensor v a) -instance (TensorFlow.Types.TensorType a, a ~ a') => TensorFlow.Nodes.Fetchable (TensorFlow.Tensor.Tensor v a) (TensorFlow.Types.TensorData a') -instance (TensorFlow.Types.TensorType a, TensorFlow.Types.TensorDataType s a, a ~ a') => TensorFlow.Nodes.Fetchable (TensorFlow.Tensor.Tensor v a) (s a') - -module TensorFlow.ControlFlow - --- | Modify a Build action, such that all new ops rendered in it --- will depend on the nodes in the first argument. -withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a - --- | Create an op that groups multiple operations. --- --- When this op finishes, all ops in the input n have finished. --- This op has no output. -group :: (MonadBuild m, Nodes t) => t -> m ControlNode - --- | Does nothing. Only useful as a placeholder for control edges. -noOp :: MonadBuild m => m ControlNode - -module TensorFlow.Session -data Session a - --- | Customization for session. Use the lenses to update: --- sessionTarget, sessionTracer, sessionConfig. -data Options - --- | Uses the specified config for the created session. -sessionConfig :: Lens' Options ConfigProto - --- | Target can be: "local", ip:port, host:port. The set of supported --- factories depends on the linked in libraries. -sessionTarget :: Lens' Options ByteString - --- | Uses the given logger to monitor session progress. -sessionTracer :: Lens' Options Tracer - --- | Run Session actions in a new TensorFlow session. -runSession :: Session a -> IO a - --- | Run Session actions in a new TensorFlow session created with --- the given option setter actions (sessionTarget, --- sessionConfig). -runSessionWithOptions :: Options -> Session a -> IO a - --- | Lift a Build action into a monad, including any explicit op --- renderings. -class Monad m => MonadBuild m -build :: MonadBuild m => Build a -> m a - --- | Add all pending rendered nodes to the TensorFlow graph and runs any --- pending initializers. --- --- Note that run, runWithFeeds, etc. will all call this function --- implicitly. -extend :: Session () -addGraphDef :: MonadBuild m => GraphDef -> m () - --- | Run a subgraph t, rendering any dependent nodes that aren't --- already rendered, and fetch the corresponding values for a. -run :: Fetchable t a => t -> Session a - --- | Run a subgraph t, rendering any dependent nodes that aren't --- already rendered, feed the given input values, and fetch the --- corresponding result values for a. -runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a - --- | Run a subgraph t, rendering and extending any dependent nodes --- that aren't already rendered. This behaves like run except that --- it doesn't do any fetches. -run_ :: Nodes t => t -> Session () - --- | Run a subgraph t, rendering any dependent nodes that aren't --- already rendered, feed the given input values, and fetch the --- corresponding result values for a. This behaves like --- runWithFeeds except that it doesn't do any fetches. -runWithFeeds_ :: Nodes t => [Feed] -> t -> Session () - --- | Starts a concurrent thread which evaluates the given Nodes forever --- until runSession exits or an exception occurs. Graph extension happens --- synchronously, but the resultant run proceeds as a separate thread. -asyncProdNodes :: Nodes t => t -> Session () -instance Control.Monad.Catch.MonadMask TensorFlow.Session.Session -instance Control.Monad.Catch.MonadCatch TensorFlow.Session.Session -instance Control.Monad.Catch.MonadThrow TensorFlow.Session.Session -instance Control.Monad.IO.Class.MonadIO TensorFlow.Session.Session -instance GHC.Base.Monad TensorFlow.Session.Session -instance GHC.Base.Applicative TensorFlow.Session.Session -instance GHC.Base.Functor TensorFlow.Session.Session -instance Data.Default.Class.Default TensorFlow.Session.Options -instance TensorFlow.Build.MonadBuild TensorFlow.Session.Session - - --- | The core functionality of TensorFlow. --- --- Unless you are defining ops, you do not need to import other modules --- from this package. --- --- Basic ops are provided in the tensorflow-ops and tensorflow-core-ops --- packages. -module TensorFlow.Core -data Session a - --- | Customization for session. Use the lenses to update: --- sessionTarget, sessionTracer, sessionConfig. -data Options - --- | Uses the specified config for the created session. -sessionConfig :: Lens' Options ConfigProto - --- | Target can be: "local", ip:port, host:port. The set of supported --- factories depends on the linked in libraries. -sessionTarget :: Lens' Options ByteString - --- | Uses the given logger to monitor session progress. -sessionTracer :: Lens' Options Tracer - --- | Run Session actions in a new TensorFlow session. -runSession :: Session a -> IO a - --- | Run Session actions in a new TensorFlow session created with --- the given option setter actions (sessionTarget, --- sessionConfig). -runSessionWithOptions :: Options -> Session a -> IO a - --- | Lift a Build action into a monad, including any explicit op --- renderings. -class Monad m => MonadBuild m -build :: MonadBuild m => Build a -> m a - --- | Types that tensor representations (e.g. Tensor, --- ControlNode) can be fetched into. --- --- Includes collections of tensors (e.g. tuples). -class Nodes t => Fetchable t a - --- | Types that contain ops which can be run. -class Nodes t - --- | Run a subgraph t, rendering any dependent nodes that aren't --- already rendered, and fetch the corresponding values for a. -run :: Fetchable t a => t -> Session a - --- | Run a subgraph t, rendering and extending any dependent nodes --- that aren't already rendered. This behaves like run except that --- it doesn't do any fetches. -run_ :: Nodes t => t -> Session () - --- | A pair of a Tensor and some data that should be fed into that --- Tensor when running the graph. -data Feed - --- | Create a Feed for feeding the given data into a Tensor --- when running the graph. --- --- Note that if a Tensor is rendered, its identity may change; so --- feeding the rendered Tensor may be different than feeding the --- original Tensor. -feed :: Rendered v => Tensor v a -> TensorData a -> Feed - --- | Run a subgraph t, rendering any dependent nodes that aren't --- already rendered, feed the given input values, and fetch the --- corresponding result values for a. -runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a - --- | Run a subgraph t, rendering any dependent nodes that aren't --- already rendered, feed the given input values, and fetch the --- corresponding result values for a. This behaves like --- runWithFeeds except that it doesn't do any fetches. -runWithFeeds_ :: Nodes t => [Feed] -> t -> Session () - --- | Starts a concurrent thread which evaluates the given Nodes forever --- until runSession exits or an exception occurs. Graph extension happens --- synchronously, but the resultant run proceeds as a separate thread. -asyncProdNodes :: Nodes t => t -> Session () - --- | An action for building nodes in a TensorFlow graph. -type Build = BuildT Identity - --- | An action for building nodes in a TensorFlow graph. Used to manage --- build state internally as part of the Session monad. -data BuildT m a - --- | Render a Tensor, fixing its name, scope, device and control --- inputs from the MonadBuild context. Also renders any --- dependencies of the Tensor that weren't already rendered. --- --- This operation is idempotent; calling render on the same input --- in the same context will produce the same result. However, rendering --- the same Tensor Build in two different contexts may result in --- two different Tensor Values. -render :: MonadBuild m => Tensor Build a -> m (Tensor Value a) - --- | Produce a GraphDef proto representation of the nodes that are rendered --- in the given Build action. -asGraphDef :: Build a -> GraphDef -addGraphDef :: MonadBuild m => GraphDef -> m () -opName :: Lens' OpDef PendingNodeName -opAttr :: Attribute a => Text -> Lens' OpDef a - --- | A type of graph node which has no outputs. These nodes are valuable --- for causing side effects when they are run. -data ControlNode - --- | A named output of a TensorFlow operation. --- --- The type parameter a is the type of the elements in the --- Tensor. The parameter v is either: --- --- --- --- Note that expr, value, render and --- renderValue can help convert between the different types of --- Tensor. -data Tensor v a -data Value a -data Ref a - --- | Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op. -value :: Tensor Ref a -> Tensor Value a - --- | Create a Tensor for a given name. This can be used to reference --- nodes in a GraphDef that was loaded via addGraphDef. --- TODO(judahjacobson): add more safety checks here. -tensorFromName :: TensorKind v => Text -> Tensor v a -expr :: TensorKind v => Tensor v a -> Tensor Build a - --- | The class of scalar types supported by tensorflow. -class TensorType a - --- | Tensor data with the correct memory layout for tensorflow. -data TensorData a - --- | Types that can be converted to and from TensorData. --- --- Vector is the most efficient to encode/decode for most element --- types. -class TensorType a => TensorDataType s a - --- | Decode the bytes of a TensorData into an s. -decodeTensorData :: TensorDataType s a => TensorData a -> s a - --- | Encode an s into a TensorData. --- --- The values should be in row major order, e.g., --- --- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ... -encodeTensorData :: TensorDataType s a => Shape -> s a -> TensorData a -newtype Scalar a -Scalar :: a -> Scalar a -[unScalar] :: Scalar a -> a - --- | Shape (dimensions) of a tensor. -newtype Shape -Shape :: [Int64] -> Shape - --- | A Constraint specifying the possible choices of a --- TensorType. --- --- We implement a Constraint like OneOf '[Double, Float] --- a by turning the natural representation as a conjunction, i.e., --- ---
    ---   a == Double || a == Float
    ---   
    --- --- into a disjunction like --- ---
    ---   a /= Int32 && a /= Int64 && a /= ByteString && ...
    ---   
    --- --- using an enumeration of all the possible TensorTypes. -type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a) - --- | A constraint checking that two types are different. - --- | Places all nodes rendered in the given Build action on the same --- device as the given Tensor (see also withDevice). Make sure --- that the action has side effects of rendering the desired tensors. A --- pure return would not have the desired effect. -colocateWith :: (MonadBuild m, Rendered v) => Tensor v b -> m a -> m a - --- | A device that a node can be assigned to. There's a naming convention --- where the device names are constructed from job and replica names. -newtype Device -Device :: Text -> Device -[deviceName] :: Device -> Text - --- | Set a device for all nodes rendered in the given Build action --- (unless further overridden by another use of withDevice). -withDevice :: MonadBuild m => Maybe Device -> m a -> m a - --- | Prepend a scope to all nodes rendered in the given Build --- action. -withNameScope :: MonadBuild m => Text -> m a -> m a - --- | Modify a Build action, such that all new ops rendered in it --- will depend on the nodes in the first argument. -withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a - --- | Create an op that groups multiple operations. --- --- When this op finishes, all ops in the input n have finished. --- This op has no output. -group :: (MonadBuild m, Nodes t) => t -> m ControlNode - --- | Does nothing. Only useful as a placeholder for control edges. -noOp :: MonadBuild m => m ControlNode diff --git a/docs/haddock/tensorflow-0.1.0.2/LICENSE b/docs/haddock/tensorflow-0.1.0.2/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Build.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Build.html new file mode 100644 index 0000000..8840257 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Build.html @@ -0,0 +1,14 @@ +TensorFlow.Build

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Build

    Graph node types

    newtype ControlNode Source #

    A type of graph node which has no outputs. These nodes are + valuable for causing side effects when they are run.

    Constructors

    ControlNode 

    Ops

    opAttr :: Attribute a => Text -> Lens' OpDef a Source #

    The Build monad

    data GraphState Source #

    Instances

    Monad m => MonadState GraphState (BuildT m) Source # 

    Methods

    get :: BuildT m GraphState

    put :: GraphState -> BuildT m ()

    state :: (GraphState -> (a, GraphState)) -> BuildT m a

    data BuildT m a Source #

    An action for building nodes in a TensorFlow graph. + Used to manage build state internally as part of the Session monad.

    Instances

    MonadTrans BuildT Source # 

    Methods

    lift :: Monad m => m a -> BuildT m a #

    TensorKind Build Source # 

    Methods

    toBuild :: Build a -> Build a Source #

    Monad m => MonadState GraphState (BuildT m) Source # 

    Methods

    get :: BuildT m GraphState

    put :: GraphState -> BuildT m ()

    state :: (GraphState -> (a, GraphState)) -> BuildT m a

    Monad m => Monad (BuildT m) Source # 

    Methods

    (>>=) :: BuildT m a -> (a -> BuildT m b) -> BuildT m b #

    (>>) :: BuildT m a -> BuildT m b -> BuildT m b #

    return :: a -> BuildT m a #

    fail :: String -> BuildT m a #

    Functor m => Functor (BuildT m) Source # 

    Methods

    fmap :: (a -> b) -> BuildT m a -> BuildT m b #

    (<$) :: a -> BuildT m b -> BuildT m a #

    MonadFix m => MonadFix (BuildT m) Source # 

    Methods

    mfix :: (a -> BuildT m a) -> BuildT m a #

    Monad m => Applicative (BuildT m) Source # 

    Methods

    pure :: a -> BuildT m a #

    (<*>) :: BuildT m (a -> b) -> BuildT m a -> BuildT m b #

    (*>) :: BuildT m a -> BuildT m b -> BuildT m b #

    (<*) :: BuildT m a -> BuildT m b -> BuildT m a #

    MonadIO m => MonadIO (BuildT m) Source # 

    Methods

    liftIO :: IO a -> BuildT m a #

    MonadThrow m => MonadThrow (BuildT m) Source # 

    Methods

    throwM :: Exception e => e -> BuildT m a

    MonadMask m => MonadMask (BuildT m) Source # 

    Methods

    mask :: ((forall a. BuildT m a -> BuildT m a) -> BuildT m b) -> BuildT m b

    uninterruptibleMask :: ((forall a. BuildT m a -> BuildT m a) -> BuildT m b) -> BuildT m b

    MonadCatch m => MonadCatch (BuildT m) Source # 

    Methods

    catch :: Exception e => BuildT m a -> (e -> BuildT m a) -> BuildT m a

    Monad m => MonadBuild (BuildT m) Source # 

    Methods

    build :: Build a -> BuildT m a Source #

    TensorTypes as => PureResult (TensorList Build as) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (TensorList Build as) Source #

    PureResult (Tensor Build a) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (Tensor Build a) Source #

    type Build = BuildT Identity Source #

    An action for building nodes in a TensorFlow graph.

    class Monad m => MonadBuild m where Source #

    Lift a Build action into a monad, including any explicit op renderings.

    Minimal complete definition

    build

    Methods

    build :: Build a -> m a Source #

    Instances

    Monad m => MonadBuild (BuildT m) Source # 

    Methods

    build :: Build a -> BuildT m a Source #

    Monad m => MonadBuild (SessionT m) Source # 

    Methods

    build :: Build a -> SessionT m a Source #

    addInitializer :: MonadBuild m => ControlNode -> m () Source #

    Registers the given node to be executed before the next + run.

    hoistBuildT :: (forall a. m a -> n a) -> BuildT m b -> BuildT n b Source #

    This is Control.Monad.Morph.hoist sans the dependency.

    evalBuildT :: Monad m => BuildT m a -> m a Source #

    asGraphDef :: Build a -> GraphDef Source #

    Produce a GraphDef proto representation of the nodes that are rendered in + the given Build action.

    flushInitializers :: Monad m => BuildT m [NodeName] Source #

    Get all the initializers that have accumulated so far, and clear + that buffer.

    flushNodeBuffer :: MonadBuild m => m [NodeDef] Source #

    Get all the NodeDefs that have accumulated so far, and clear that buffer.

    Creating and looking up Ops

    getOrAddOp :: OpDef -> Build NodeName Source #

    Render the given op if it hasn't been rendered already, and return its + name.

    addNewOp :: OpDef -> Build NodeName Source #

    Add a new node for a given OpDef. This is used for making "stateful" ops + which are not safe to dedup (e.g, "variable" and "assign").

    encodeOutput :: Output -> Text Source #

    Turn an Output into a string representation for the TensorFlow + foreign APIs.

    Modifying all nodes in a Build action

    withStateLens :: MonadBuild m => Lens' GraphState a -> (a -> a) -> m b -> m b Source #

    Modify some part of the state, run an action, and restore the state + after that action is done.

    withDevice :: MonadBuild m => Maybe Device -> m a -> m a Source #

    Set a device for all nodes rendered in the given Build action + (unless further overridden by another use of withDevice).

    withNameScope :: MonadBuild m => Text -> m a -> m a Source #

    Prepend a scope to all nodes rendered in the given Build action.

    withNodeDependencies :: MonadBuild m => Set NodeName -> m a -> m a Source #

    Add control inputs to all nodes rendered in the given Build action.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-BuildOp.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-BuildOp.html new file mode 100644 index 0000000..eb3d404 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-BuildOp.html @@ -0,0 +1,6 @@ +TensorFlow.BuildOp

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.BuildOp

    Synopsis

    Documentation

    class BuildResult a where Source #

    Class of types that can be used as op outputs.

    Minimal complete definition

    buildResult

    Methods

    buildResult :: Result a Source #

    Instances

    BuildResult ControlNode Source # 

    Methods

    buildResult :: Result ControlNode Source #

    BuildResult a => BuildResult [a] Source # 

    Methods

    buildResult :: Result [a] Source #

    (BuildResult a1, BuildResult a2) => BuildResult (a1, a2) Source # 

    Methods

    buildResult :: Result (a1, a2) Source #

    (TensorKind v, Rendered (Tensor v), TensorTypes as) => BuildResult (TensorList v as) Source # 

    Methods

    buildResult :: Result (TensorList v as) Source #

    (TensorKind v, Rendered (Tensor v)) => BuildResult (Tensor v a) Source # 

    Methods

    buildResult :: Result (Tensor v a) Source #

    (BuildResult a1, BuildResult a2, BuildResult a3) => BuildResult (a1, a2, a3) Source # 

    Methods

    buildResult :: Result (a1, a2, a3) Source #

    (BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4) => BuildResult (a1, a2, a3, a4) Source # 

    Methods

    buildResult :: Result (a1, a2, a3, a4) Source #

    (BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5) => BuildResult (a1, a2, a3, a4, a5) Source # 

    Methods

    buildResult :: Result (a1, a2, a3, a4, a5) Source #

    (BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5, BuildResult a6) => BuildResult (a1, a2, a3, a4, a5, a6) Source # 

    Methods

    buildResult :: Result (a1, a2, a3, a4, a5, a6) Source #

    (BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5, BuildResult a6, BuildResult a7) => BuildResult (a1, a2, a3, a4, a5, a6, a7) Source # 

    Methods

    buildResult :: Result (a1, a2, a3, a4, a5, a6, a7) Source #

    (BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5, BuildResult a6, BuildResult a7, BuildResult a8) => BuildResult (a1, a2, a3, a4, a5, a6, a7, a8) Source # 

    Methods

    buildResult :: Result (a1, a2, a3, a4, a5, a6, a7, a8) Source #

    class PureResult a where Source #

    Class of types that can be used as op outputs.

    Minimal complete definition

    pureResult

    Methods

    pureResult :: ReaderT (Build OpDef) (State ResultState) a Source #

    Instances

    PureResult a => PureResult [a] Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) [a] Source #

    (PureResult a1, PureResult a2) => PureResult (a1, a2) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (a1, a2) Source #

    TensorTypes as => PureResult (TensorList Build as) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (TensorList Build as) Source #

    PureResult (Tensor Build a) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (Tensor Build a) Source #

    (PureResult a1, PureResult a2, PureResult a3) => PureResult (a1, a2, a3) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (a1, a2, a3) Source #

    (PureResult a1, PureResult a2, PureResult a3, PureResult a4) => PureResult (a1, a2, a3, a4) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (a1, a2, a3, a4) Source #

    (PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5) => PureResult (a1, a2, a3, a4, a5) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (a1, a2, a3, a4, a5) Source #

    (PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5, PureResult a6) => PureResult (a1, a2, a3, a4, a5, a6) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (a1, a2, a3, a4, a5, a6) Source #

    (PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5, PureResult a6, PureResult a7) => PureResult (a1, a2, a3, a4, a5, a6, a7) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (a1, a2, a3, a4, a5, a6, a7) Source #

    (PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5, PureResult a6, PureResult a7, PureResult a8) => PureResult (a1, a2, a3, a4, a5, a6, a7, a8) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (a1, a2, a3, a4, a5, a6, a7, a8) Source #

    eqLengthGuard :: [(String, [(String, Int)])] -> Bool Source #

    Returns true if all the integers in each tuple are identical. + Throws an error with a descriptive message if not.

    class BuildInputs a where Source #

    Minimal complete definition

    buildInputs

    Methods

    buildInputs :: a -> Build [Output] Source #

    Instances

    type OpParams = OpDef -> OpDef Source #

    Parameters to build an op (for example, the node name or optional attributes). + TODO: be more type safe.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-ControlFlow.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-ControlFlow.html new file mode 100644 index 0000000..67444b3 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-ControlFlow.html @@ -0,0 +1,6 @@ +TensorFlow.ControlFlow

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.ControlFlow

    Synopsis

    Dependencies

    withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a Source #

    Modify a Build action, such that all new ops rendered in it will depend + on the nodes in the first argument.

    group :: (MonadBuild m, Nodes t) => t -> m ControlNode Source #

    Create an op that groups multiple operations.

    When this op finishes, all ops in the input n have finished. This op has + no output.

    Operations

    noOp :: MonadBuild m => m ControlNode Source #

    Does nothing. Only useful as a placeholder for control edges.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Core.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Core.html new file mode 100644 index 0000000..3536f28 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Core.html @@ -0,0 +1,50 @@ +TensorFlow.Core

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Core

    Description

    The core functionality of TensorFlow.

    Unless you are defining ops, you do not need to import other modules from + this package.

    Basic ops are provided in the tensorflow-ops and tensorflow-core-ops + packages.

    Synopsis

    Session

    data Options Source #

    Customization for session. Use the lenses to update: + sessionTarget, sessionTracer, sessionConfig.

    Instances

    Default Options Source # 

    Methods

    def :: Options

    sessionConfig :: Lens' Options ConfigProto Source #

    Uses the specified config for the created session.

    sessionTarget :: Lens' Options ByteString Source #

    Target can be: "local", ip:port, host:port. + The set of supported factories depends on the linked in libraries.

    sessionTracer :: Lens' Options Tracer Source #

    Uses the given logger to monitor session progress.

    runSession :: (MonadMask m, MonadIO m) => SessionT m a -> m a Source #

    Run Session actions in a new TensorFlow session.

    runSessionWithOptions :: (MonadMask m, MonadIO m) => Options -> SessionT m a -> m a Source #

    Run Session actions in a new TensorFlow session created with + the given option setter actions (sessionTarget, sessionConfig).

    Building graphs

    class Monad m => MonadBuild m where Source #

    Lift a Build action into a monad, including any explicit op renderings.

    Minimal complete definition

    build

    Methods

    build :: Build a -> m a Source #

    Instances

    Monad m => MonadBuild (BuildT m) Source # 

    Methods

    build :: Build a -> BuildT m a Source #

    Monad m => MonadBuild (SessionT m) Source # 

    Methods

    build :: Build a -> SessionT m a Source #

    Running graphs

    class Nodes t => Fetchable t a Source #

    Types that tensor representations (e.g. Tensor, ControlNode) can be + fetched into.

    Includes collections of tensors (e.g. tuples).

    Minimal complete definition

    getFetch

    Instances

    (~) * a () => Fetchable ControlNode a Source # 
    Fetchable t a => Fetchable [t] [a] Source # 

    Methods

    getFetch :: [t] -> Build (Fetch [a]) Source #

    Fetchable t a => Fetchable (Maybe t) (Maybe a) Source # 

    Methods

    getFetch :: Maybe t -> Build (Fetch (Maybe a)) Source #

    (~) * l (List ([] *)) => Fetchable (ListOf f ([] *)) l Source # 

    Methods

    getFetch :: ListOf f [*] -> Build (Fetch l) Source #

    (TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') Source # 

    Methods

    getFetch :: Tensor v a -> Build (Fetch (s a')) Source #

    (TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') Source # 

    Methods

    getFetch :: Tensor v a -> Build (Fetch (TensorData a')) Source #

    (Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) Source # 

    Methods

    getFetch :: (t1, t2) -> Build (Fetch (a1, a2)) Source #

    (Fetchable (f t) a, Fetchable (ListOf f ts) (List as), (~) (* -> *) i Identity) => Fetchable (ListOf f ((:) * t ts)) (ListOf i ((:) * a as)) Source # 

    Methods

    getFetch :: ListOf f ((* ': t) ts) -> Build (Fetch (ListOf i ((* ': a) as))) Source #

    (Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3) => Fetchable (t1, t2, t3) (a1, a2, a3) Source # 

    Methods

    getFetch :: (t1, t2, t3) -> Build (Fetch (a1, a2, a3)) Source #

    class Nodes t Source #

    Types that contain ops which can be run.

    Minimal complete definition

    getNodes

    Instances

    Nodes ControlNode Source # 
    Nodes t => Nodes [t] Source # 

    Methods

    getNodes :: [t] -> Build (Set NodeName) Source #

    Nodes t => Nodes (Maybe t) Source # 
    (Nodes t1, Nodes t2) => Nodes (t1, t2) Source # 

    Methods

    getNodes :: (t1, t2) -> Build (Set NodeName) Source #

    (Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f ((:) * a as)) Source # 

    Methods

    getNodes :: ListOf f ((* ': a) as) -> Build (Set NodeName) Source #

    Nodes (ListOf f ([] *)) Source # 

    Methods

    getNodes :: ListOf f [*] -> Build (Set NodeName) Source #

    Nodes (Tensor v a) Source # 

    Methods

    getNodes :: Tensor v a -> Build (Set NodeName) Source #

    (Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) Source # 

    Methods

    getNodes :: (t1, t2, t3) -> Build (Set NodeName) Source #

    run :: (MonadIO m, Fetchable t a) => t -> SessionT m a Source #

    Run a subgraph t, rendering any dependent nodes that aren't already + rendered, and fetch the corresponding values for a.

    run_ :: (MonadIO m, Nodes t) => t -> SessionT m () Source #

    Run a subgraph t, rendering and extending any dependent nodes that aren't + already rendered. This behaves like run except that it doesn't do any + fetches.

    data Feed Source #

    A pair of a Tensor and some data that should be fed into that Tensor + when running the graph.

    feed :: Rendered t => t a -> TensorData a -> Feed Source #

    Create a Feed for feeding the given data into a Tensor when running + the graph.

    Note that if a Tensor is rendered, its identity may change; so feeding the + rendered Tensor may be different than feeding the original Tensor.

    runWithFeeds :: (MonadIO m, Fetchable t a) => [Feed] -> t -> SessionT m a Source #

    Run a subgraph t, rendering any dependent nodes that aren't already + rendered, feed the given input values, and fetch the corresponding result + values for a.

    runWithFeeds_ :: (MonadIO m, Nodes t) => [Feed] -> t -> SessionT m () Source #

    Run a subgraph t, rendering any dependent nodes that aren't already + rendered, feed the given input values, and fetch the corresponding result + values for a. This behaves like runWithFeeds except that it doesn't do + any fetches.

    Async

    asyncProdNodes Source #

    Arguments

    :: (MonadIO m, Nodes t) 
    => t

    Node to evaluate concurrently.

    -> SessionT m () 

    Starts a concurrent thread which evaluates the given Nodes + forever until runSession exits or an exception occurs. Graph + extension happens synchronously, but the resultant run proceeds as + a separate thread.

    Build

    type Build = BuildT Identity Source #

    An action for building nodes in a TensorFlow graph.

    data BuildT m a Source #

    An action for building nodes in a TensorFlow graph. + Used to manage build state internally as part of the Session monad.

    Instances

    MonadTrans BuildT Source # 

    Methods

    lift :: Monad m => m a -> BuildT m a #

    TensorKind Build Source # 

    Methods

    toBuild :: Build a -> Build a Source #

    Monad m => MonadState GraphState (BuildT m) Source # 

    Methods

    get :: BuildT m GraphState

    put :: GraphState -> BuildT m ()

    state :: (GraphState -> (a, GraphState)) -> BuildT m a

    Monad m => Monad (BuildT m) Source # 

    Methods

    (>>=) :: BuildT m a -> (a -> BuildT m b) -> BuildT m b #

    (>>) :: BuildT m a -> BuildT m b -> BuildT m b #

    return :: a -> BuildT m a #

    fail :: String -> BuildT m a #

    Functor m => Functor (BuildT m) Source # 

    Methods

    fmap :: (a -> b) -> BuildT m a -> BuildT m b #

    (<$) :: a -> BuildT m b -> BuildT m a #

    MonadFix m => MonadFix (BuildT m) Source # 

    Methods

    mfix :: (a -> BuildT m a) -> BuildT m a #

    Monad m => Applicative (BuildT m) Source # 

    Methods

    pure :: a -> BuildT m a #

    (<*>) :: BuildT m (a -> b) -> BuildT m a -> BuildT m b #

    (*>) :: BuildT m a -> BuildT m b -> BuildT m b #

    (<*) :: BuildT m a -> BuildT m b -> BuildT m a #

    MonadIO m => MonadIO (BuildT m) Source # 

    Methods

    liftIO :: IO a -> BuildT m a #

    MonadThrow m => MonadThrow (BuildT m) Source # 

    Methods

    throwM :: Exception e => e -> BuildT m a

    MonadMask m => MonadMask (BuildT m) Source # 

    Methods

    mask :: ((forall a. BuildT m a -> BuildT m a) -> BuildT m b) -> BuildT m b

    uninterruptibleMask :: ((forall a. BuildT m a -> BuildT m a) -> BuildT m b) -> BuildT m b

    MonadCatch m => MonadCatch (BuildT m) Source # 

    Methods

    catch :: Exception e => BuildT m a -> (e -> BuildT m a) -> BuildT m a

    Monad m => MonadBuild (BuildT m) Source # 

    Methods

    build :: Build a -> BuildT m a Source #

    TensorTypes as => PureResult (TensorList Build as) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (TensorList Build as) Source #

    PureResult (Tensor Build a) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (Tensor Build a) Source #

    render :: MonadBuild m => Tensor Build a -> m (Tensor Value a) Source #

    Render a Tensor, fixing its name, scope, device and control inputs from + the MonadBuild context. Also renders any dependencies of the Tensor that + weren't already rendered.

    This operation is idempotent; calling render on the same input in the same + context will produce the same result. However, rendering the same + Tensor Build in two different contexts may result in two different + Tensor Values.

    asGraphDef :: Build a -> GraphDef Source #

    Produce a GraphDef proto representation of the nodes that are rendered in + the given Build action.

    opAttr :: Attribute a => Text -> Lens' OpDef a Source #

    addInitializer :: MonadBuild m => ControlNode -> m () Source #

    Registers the given node to be executed before the next + run.

    Tensor

    data ControlNode Source #

    A type of graph node which has no outputs. These nodes are + valuable for causing side effects when they are run.

    data Tensor v a Source #

    A named output of a TensorFlow operation.

    The type parameter a is the type of the elements in the Tensor. The + parameter v is either:

    • Build: An unrendered, immutable value.
    • Value: A rendered, immutable value.
    • Ref: A rendered stateful handle (e.g., a variable).

    Note that expr, value, render and renderValue can help convert between + the different types of Tensor.

    Instances

    data Value a Source #

    Instances

    Monad Value Source # 

    Methods

    (>>=) :: Value a -> (a -> Value b) -> Value b #

    (>>) :: Value a -> Value b -> Value b #

    return :: a -> Value a #

    fail :: String -> Value a #

    Functor Value Source # 

    Methods

    fmap :: (a -> b) -> Value a -> Value b #

    (<$) :: a -> Value b -> Value a #

    Applicative Value Source # 

    Methods

    pure :: a -> Value a #

    (<*>) :: Value (a -> b) -> Value a -> Value b #

    (*>) :: Value a -> Value b -> Value b #

    (<*) :: Value a -> Value b -> Value a #

    TensorKind Value Source # 

    Methods

    toBuild :: Value a -> Build a Source #

    Rendered (Tensor Value) Source # 

    data Ref a Source #

    Instances

    Monad Ref Source # 

    Methods

    (>>=) :: Ref a -> (a -> Ref b) -> Ref b #

    (>>) :: Ref a -> Ref b -> Ref b #

    return :: a -> Ref a #

    fail :: String -> Ref a #

    Functor Ref Source # 

    Methods

    fmap :: (a -> b) -> Ref a -> Ref b #

    (<$) :: a -> Ref b -> Ref a #

    Applicative Ref Source # 

    Methods

    pure :: a -> Ref a #

    (<*>) :: Ref (a -> b) -> Ref a -> Ref b #

    (*>) :: Ref a -> Ref b -> Ref b #

    (<*) :: Ref a -> Ref b -> Ref a #

    TensorKind Ref Source # 

    Methods

    toBuild :: Ref a -> Build a Source #

    Rendered (Tensor Ref) Source # 

    value :: Tensor Ref a -> Tensor Value a Source #

    Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op.

    tensorFromName :: TensorKind v => Text -> Tensor v a Source #

    Create a Tensor for a given name. This can be used to reference nodes + in a GraphDef that was loaded via addGraphDef. + TODO(judahjacobson): add more safety checks here.

    Element types

    class TensorType a Source #

    The class of scalar types supported by tensorflow.

    Minimal complete definition

    tensorType, tensorRefType, tensorVal

    Instances

    TensorType Bool Source # 
    TensorType Double Source # 
    TensorType Float Source # 
    TensorType Int8 Source # 
    TensorType Int16 Source # 
    TensorType Int32 Source # 
    TensorType Int64 Source # 
    TensorType Word8 Source # 
    TensorType Word16 Source # 
    TensorType ByteString Source # 
    TensorType ResourceHandle Source # 
    TensorType (Complex Double) Source # 
    TensorType (Complex Float) Source # 

    data TensorData a Source #

    Tensor data with the correct memory layout for tensorflow.

    Instances

    (TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') Source # 

    Methods

    getFetch :: Tensor v a -> Build (Fetch (TensorData a')) Source #

    class TensorType a => TensorDataType s a where Source #

    Types that can be converted to and from TensorData.

    Vector is the most efficient to encode/decode for most element types.

    Minimal complete definition

    decodeTensorData, encodeTensorData

    Methods

    decodeTensorData :: TensorData a -> s a Source #

    Decode the bytes of a TensorData into an s.

    encodeTensorData :: Shape -> s a -> TensorData a Source #

    Encode an s into a TensorData.

    The values should be in row major order, e.g.,

    element 0: index (0, ..., 0) + element 1: index (0, ..., 1) + ...

    Instances

    TensorDataType Vector Bool Source # 
    TensorDataType Vector Double Source # 
    TensorDataType Vector Float Source # 
    TensorDataType Vector Int8 Source # 
    TensorDataType Vector Int16 Source # 
    TensorDataType Vector Int32 Source # 
    TensorDataType Vector Int64 Source # 
    TensorDataType Vector Word8 Source # 
    TensorDataType Vector Word16 Source # 
    (Storable a, TensorDataType Vector a, TensorType a) => TensorDataType Vector a Source # 

    Methods

    decodeTensorData :: TensorData a -> Vector a Source #

    encodeTensorData :: Shape -> Vector a -> TensorData a Source #

    TensorDataType Vector ByteString Source # 
    (TensorDataType Vector a, TensorType a) => TensorDataType Scalar a Source # 
    TensorDataType Vector (Complex Double) Source # 
    TensorDataType Vector (Complex Float) Source # 

    newtype Scalar a Source #

    Constructors

    Scalar 

    Fields

    Instances

    (TensorDataType Vector a, TensorType a) => TensorDataType Scalar a Source # 
    Eq a => Eq (Scalar a) Source # 

    Methods

    (==) :: Scalar a -> Scalar a -> Bool #

    (/=) :: Scalar a -> Scalar a -> Bool #

    Floating a => Floating (Scalar a) Source # 

    Methods

    pi :: Scalar a #

    exp :: Scalar a -> Scalar a #

    log :: Scalar a -> Scalar a #

    sqrt :: Scalar a -> Scalar a #

    (**) :: Scalar a -> Scalar a -> Scalar a #

    logBase :: Scalar a -> Scalar a -> Scalar a #

    sin :: Scalar a -> Scalar a #

    cos :: Scalar a -> Scalar a #

    tan :: Scalar a -> Scalar a #

    asin :: Scalar a -> Scalar a #

    acos :: Scalar a -> Scalar a #

    atan :: Scalar a -> Scalar a #

    sinh :: Scalar a -> Scalar a #

    cosh :: Scalar a -> Scalar a #

    tanh :: Scalar a -> Scalar a #

    asinh :: Scalar a -> Scalar a #

    acosh :: Scalar a -> Scalar a #

    atanh :: Scalar a -> Scalar a #

    log1p :: Scalar a -> Scalar a #

    expm1 :: Scalar a -> Scalar a #

    log1pexp :: Scalar a -> Scalar a #

    log1mexp :: Scalar a -> Scalar a #

    Fractional a => Fractional (Scalar a) Source # 

    Methods

    (/) :: Scalar a -> Scalar a -> Scalar a #

    recip :: Scalar a -> Scalar a #

    fromRational :: Rational -> Scalar a #

    Num a => Num (Scalar a) Source # 

    Methods

    (+) :: Scalar a -> Scalar a -> Scalar a #

    (-) :: Scalar a -> Scalar a -> Scalar a #

    (*) :: Scalar a -> Scalar a -> Scalar a #

    negate :: Scalar a -> Scalar a #

    abs :: Scalar a -> Scalar a #

    signum :: Scalar a -> Scalar a #

    fromInteger :: Integer -> Scalar a #

    Ord a => Ord (Scalar a) Source # 

    Methods

    compare :: Scalar a -> Scalar a -> Ordering #

    (<) :: Scalar a -> Scalar a -> Bool #

    (<=) :: Scalar a -> Scalar a -> Bool #

    (>) :: Scalar a -> Scalar a -> Bool #

    (>=) :: Scalar a -> Scalar a -> Bool #

    max :: Scalar a -> Scalar a -> Scalar a #

    min :: Scalar a -> Scalar a -> Scalar a #

    Real a => Real (Scalar a) Source # 

    Methods

    toRational :: Scalar a -> Rational #

    RealFloat a => RealFloat (Scalar a) Source # 
    RealFrac a => RealFrac (Scalar a) Source # 

    Methods

    properFraction :: Integral b => Scalar a -> (b, Scalar a) #

    truncate :: Integral b => Scalar a -> b #

    round :: Integral b => Scalar a -> b #

    ceiling :: Integral b => Scalar a -> b #

    floor :: Integral b => Scalar a -> b #

    Show a => Show (Scalar a) Source # 

    Methods

    showsPrec :: Int -> Scalar a -> ShowS #

    show :: Scalar a -> String #

    showList :: [Scalar a] -> ShowS #

    IsString a => IsString (Scalar a) Source # 

    Methods

    fromString :: String -> Scalar a #

    newtype Shape Source #

    Shape (dimensions) of a tensor.

    Constructors

    Shape [Int64] 

    Instances

    IsList Shape Source # 

    Associated Types

    type Item Shape :: * #

    Show Shape Source # 

    Methods

    showsPrec :: Int -> Shape -> ShowS #

    show :: Shape -> String #

    showList :: [Shape] -> ShowS #

    Attribute Shape Source # 

    Methods

    attrLens :: Lens' AttrValue Shape Source #

    type Item Shape Source # 

    type OneOf ts a = (TensorType a, TensorTypes' ts, NoneOf (AllTensorTypes \\ ts) a) Source #

    A Constraint specifying the possible choices of a TensorType.

    We implement a Constraint like OneOf '[Double, Float] a by turning the + natural representation as a conjunction, i.e.,

       a == Double || a == Float
    +

    into a disjunction like

        a /= Int32 && a /= Int64 && a /= ByteString && ...
    +

    using an enumeration of all the possible TensorTypes.

    type family a /= b :: Constraint where ... Source #

    A constraint checking that two types are different.

    Equations

    a /= a = TypeError a ~ ExcludedCase 
    a /= b = () 

    Op combinators

    colocateWith :: (MonadBuild m, Rendered t) => t b -> m a -> m a Source #

    Places all nodes rendered in the given Build action on the same + device as the given Tensor (see also withDevice). Make sure that + the action has side effects of rendering the desired tensors. A pure + return would not have the desired effect.

    newtype Device Source #

    A device that a node can be assigned to. + There's a naming convention where the device names + are constructed from job and replica names.

    Constructors

    Device 

    Fields

    withDevice :: MonadBuild m => Maybe Device -> m a -> m a Source #

    Set a device for all nodes rendered in the given Build action + (unless further overridden by another use of withDevice).

    withNameScope :: MonadBuild m => Text -> m a -> m a Source #

    Prepend a scope to all nodes rendered in the given Build action.

    Dependencies

    withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a Source #

    Modify a Build action, such that all new ops rendered in it will depend + on the nodes in the first argument.

    group :: (MonadBuild m, Nodes t) => t -> m ControlNode Source #

    Create an op that groups multiple operations.

    When this op finishes, all ops in the input n have finished. This op has + no output.

    Misc

    noOp :: MonadBuild m => m ControlNode Source #

    Does nothing. Only useful as a placeholder for control edges.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Internal-FFI.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Internal-FFI.html new file mode 100644 index 0000000..2ff8dab --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Internal-FFI.html @@ -0,0 +1,8 @@ +TensorFlow.Internal.FFI

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Internal.FFI

    Synopsis

    Documentation

    withSession Source #

    Arguments

    :: (MonadIO m, MonadMask m) 
    => (SessionOptions -> IO ()) 
    -> ((IO () -> IO ()) -> Session -> m a)

    The action can spawn concurrent tasks which will + be canceled before withSession returns.

    -> m a 

    Runs the given action after creating a session with options + populated by the given optionSetter.

    run Source #

    Arguments

    :: Session 
    -> [(ByteString, TensorData)]

    Feeds.

    -> [ByteString]

    Fetches.

    -> [ByteString]

    Targets.

    -> IO [TensorData] 

    data TensorData Source #

    All of the data needed to represent a tensor.

    setSessionConfig :: ConfigProto -> SessionOptions -> IO () Source #

    setSessionTarget :: ByteString -> SessionOptions -> IO () Source #

    getAllOpList :: IO ByteString Source #

    Returns the serialized OpList of all OpDefs defined in this + address space.

    Internal helper.

    useProtoAsVoidPtrLen :: (Message msg, Integral c, Show c, Bits c) => msg -> (Ptr b -> c -> IO a) -> IO a Source #

    Serializes the given msg and provides it as (ptr,len) argument + to the given action.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Internal-VarInt.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Internal-VarInt.html new file mode 100644 index 0000000..3be01b3 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Internal-VarInt.html @@ -0,0 +1,4 @@ +TensorFlow.Internal.VarInt

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellSafe
    LanguageHaskell2010

    TensorFlow.Internal.VarInt

    Description

    Originally taken from internal proto-lens code.

    Synopsis

    Documentation

    getVarInt :: Parser Word64 Source #

    Decode an unsigned varint.

    putVarInt :: Word64 -> Builder Source #

    Encode a Word64.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Nodes.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Nodes.html new file mode 100644 index 0000000..029f756 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Nodes.html @@ -0,0 +1,6 @@ +TensorFlow.Nodes

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Nodes

    Synopsis

    Documentation

    class Nodes t where Source #

    Types that contain ops which can be run.

    Minimal complete definition

    getNodes

    Methods

    getNodes :: t -> Build (Set NodeName) Source #

    Instances

    Nodes ControlNode Source # 
    Nodes t => Nodes [t] Source # 

    Methods

    getNodes :: [t] -> Build (Set NodeName) Source #

    Nodes t => Nodes (Maybe t) Source # 
    (Nodes t1, Nodes t2) => Nodes (t1, t2) Source # 

    Methods

    getNodes :: (t1, t2) -> Build (Set NodeName) Source #

    (Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f ((:) * a as)) Source # 

    Methods

    getNodes :: ListOf f ((* ': a) as) -> Build (Set NodeName) Source #

    Nodes (ListOf f ([] *)) Source # 

    Methods

    getNodes :: ListOf f [*] -> Build (Set NodeName) Source #

    Nodes (Tensor v a) Source # 

    Methods

    getNodes :: Tensor v a -> Build (Set NodeName) Source #

    (Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) Source # 

    Methods

    getNodes :: (t1, t2, t3) -> Build (Set NodeName) Source #

    class Nodes t => Fetchable t a where Source #

    Types that tensor representations (e.g. Tensor, ControlNode) can be + fetched into.

    Includes collections of tensors (e.g. tuples).

    Minimal complete definition

    getFetch

    Methods

    getFetch :: t -> Build (Fetch a) Source #

    Instances

    (~) * a () => Fetchable ControlNode a Source # 
    Fetchable t a => Fetchable [t] [a] Source # 

    Methods

    getFetch :: [t] -> Build (Fetch [a]) Source #

    Fetchable t a => Fetchable (Maybe t) (Maybe a) Source # 

    Methods

    getFetch :: Maybe t -> Build (Fetch (Maybe a)) Source #

    (~) * l (List ([] *)) => Fetchable (ListOf f ([] *)) l Source # 

    Methods

    getFetch :: ListOf f [*] -> Build (Fetch l) Source #

    (TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') Source # 

    Methods

    getFetch :: Tensor v a -> Build (Fetch (s a')) Source #

    (TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') Source # 

    Methods

    getFetch :: Tensor v a -> Build (Fetch (TensorData a')) Source #

    (Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) Source # 

    Methods

    getFetch :: (t1, t2) -> Build (Fetch (a1, a2)) Source #

    (Fetchable (f t) a, Fetchable (ListOf f ts) (List as), (~) (* -> *) i Identity) => Fetchable (ListOf f ((:) * t ts)) (ListOf i ((:) * a as)) Source # 

    Methods

    getFetch :: ListOf f ((* ': t) ts) -> Build (Fetch (ListOf i ((* ': a) as))) Source #

    (Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3) => Fetchable (t1, t2, t3) (a1, a2, a3) Source # 

    Methods

    getFetch :: (t1, t2, t3) -> Build (Fetch (a1, a2, a3)) Source #

    data Fetch a Source #

    Fetch action. Keeps track of what needs to be fetched and how to decode + the fetched data.

    Constructors

    Fetch 

    Fields

    Instances

    Functor Fetch Source # 

    Methods

    fmap :: (a -> b) -> Fetch a -> Fetch b #

    (<$) :: a -> Fetch b -> Fetch a #

    Applicative Fetch Source # 

    Methods

    pure :: a -> Fetch a #

    (<*>) :: Fetch (a -> b) -> Fetch a -> Fetch b #

    (*>) :: Fetch a -> Fetch b -> Fetch b #

    (<*) :: Fetch a -> Fetch b -> Fetch a #

    nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b Source #

    fetchTensorVector :: forall a v. TensorType a => Tensor v a -> Build (Fetch (TensorData a)) Source #

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Output.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Output.html new file mode 100644 index 0000000..51fa3ab --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Output.html @@ -0,0 +1,12 @@ +TensorFlow.Output

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Output

    Contents

    Documentation

    newtype ControlNode Source #

    A type of graph node which has no outputs. These nodes are + valuable for causing side effects when they are run.

    Constructors

    ControlNode 

    newtype Device Source #

    A device that a node can be assigned to. + There's a naming convention where the device names + are constructed from job and replica names.

    Constructors

    Device 

    Fields

    Ops

    newtype NodeName Source #

    The name of a node in the graph. This corresponds to the proto field + NodeDef.name. Includes the scope prefix (if any) and a unique identifier + (if the node was implicitly named).

    Constructors

    NodeName 

    Fields

    data OpDef Source #

    Op definition. This corresponds somewhat to the NodeDef proto.

    Instances

    Eq OpDef Source # 

    Methods

    (==) :: OpDef -> OpDef -> Bool #

    (/=) :: OpDef -> OpDef -> Bool #

    Ord OpDef Source # 

    Methods

    compare :: OpDef -> OpDef -> Ordering #

    (<) :: OpDef -> OpDef -> Bool #

    (<=) :: OpDef -> OpDef -> Bool #

    (>) :: OpDef -> OpDef -> Bool #

    (>=) :: OpDef -> OpDef -> Bool #

    max :: OpDef -> OpDef -> OpDef #

    min :: OpDef -> OpDef -> OpDef #

    opAttr :: Attribute a => Text -> Lens' OpDef a Source #

    newtype OpType Source #

    The type of op of a node in the graph. This corresponds to the proto field + NodeDef.op.

    Constructors

    OpType 

    Fields

    data Output Source #

    An output of a TensorFlow node.

    Constructors

    Output 
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Session.html new file mode 100644 index 0000000..ed4bb55 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Session.html @@ -0,0 +1,19 @@ +TensorFlow.Session

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Session

    Synopsis

    Documentation

    data SessionT m a Source #

    Instances

    MonadTrans SessionT Source # 

    Methods

    lift :: Monad m => m a -> SessionT m a #

    Monad m => Monad (SessionT m) Source # 

    Methods

    (>>=) :: SessionT m a -> (a -> SessionT m b) -> SessionT m b #

    (>>) :: SessionT m a -> SessionT m b -> SessionT m b #

    return :: a -> SessionT m a #

    fail :: String -> SessionT m a #

    Functor m => Functor (SessionT m) Source # 

    Methods

    fmap :: (a -> b) -> SessionT m a -> SessionT m b #

    (<$) :: a -> SessionT m b -> SessionT m a #

    Monad m => Applicative (SessionT m) Source # 

    Methods

    pure :: a -> SessionT m a #

    (<*>) :: SessionT m (a -> b) -> SessionT m a -> SessionT m b #

    (*>) :: SessionT m a -> SessionT m b -> SessionT m b #

    (<*) :: SessionT m a -> SessionT m b -> SessionT m a #

    MonadIO m => MonadIO (SessionT m) Source # 

    Methods

    liftIO :: IO a -> SessionT m a #

    MonadThrow m => MonadThrow (SessionT m) Source # 

    Methods

    throwM :: Exception e => e -> SessionT m a

    MonadMask m => MonadMask (SessionT m) Source # 

    Methods

    mask :: ((forall a. SessionT m a -> SessionT m a) -> SessionT m b) -> SessionT m b

    uninterruptibleMask :: ((forall a. SessionT m a -> SessionT m a) -> SessionT m b) -> SessionT m b

    MonadCatch m => MonadCatch (SessionT m) Source # 

    Methods

    catch :: Exception e => SessionT m a -> (e -> SessionT m a) -> SessionT m a

    Monad m => MonadBuild (SessionT m) Source # 

    Methods

    build :: Build a -> SessionT m a Source #

    data Options Source #

    Customization for session. Use the lenses to update: + sessionTarget, sessionTracer, sessionConfig.

    Instances

    Default Options Source # 

    Methods

    def :: Options

    sessionConfig :: Lens' Options ConfigProto Source #

    Uses the specified config for the created session.

    sessionTarget :: Lens' Options ByteString Source #

    Target can be: "local", ip:port, host:port. + The set of supported factories depends on the linked in libraries.

    sessionTracer :: Lens' Options Tracer Source #

    Uses the given logger to monitor session progress.

    runSession :: (MonadMask m, MonadIO m) => SessionT m a -> m a Source #

    Run Session actions in a new TensorFlow session.

    runSessionWithOptions :: (MonadMask m, MonadIO m) => Options -> SessionT m a -> m a Source #

    Run Session actions in a new TensorFlow session created with + the given option setter actions (sessionTarget, sessionConfig).

    class Monad m => MonadBuild m where Source #

    Lift a Build action into a monad, including any explicit op renderings.

    Minimal complete definition

    build

    Methods

    build :: Build a -> m a Source #

    Instances

    Monad m => MonadBuild (BuildT m) Source # 

    Methods

    build :: Build a -> BuildT m a Source #

    Monad m => MonadBuild (SessionT m) Source # 

    Methods

    build :: Build a -> SessionT m a Source #

    extend :: MonadIO m => SessionT m () Source #

    Add all pending rendered nodes to the TensorFlow graph and runs + any pending initializers.

    Note that run, runWithFeeds, etc. will all call this function implicitly.

    run :: (MonadIO m, Fetchable t a) => t -> SessionT m a Source #

    Run a subgraph t, rendering any dependent nodes that aren't already + rendered, and fetch the corresponding values for a.

    runWithFeeds :: (MonadIO m, Fetchable t a) => [Feed] -> t -> SessionT m a Source #

    Run a subgraph t, rendering any dependent nodes that aren't already + rendered, feed the given input values, and fetch the corresponding result + values for a.

    run_ :: (MonadIO m, Nodes t) => t -> SessionT m () Source #

    Run a subgraph t, rendering and extending any dependent nodes that aren't + already rendered. This behaves like run except that it doesn't do any + fetches.

    runWithFeeds_ :: (MonadIO m, Nodes t) => [Feed] -> t -> SessionT m () Source #

    Run a subgraph t, rendering any dependent nodes that aren't already + rendered, feed the given input values, and fetch the corresponding result + values for a. This behaves like runWithFeeds except that it doesn't do + any fetches.

    asyncProdNodes Source #

    Arguments

    :: (MonadIO m, Nodes t) 
    => t

    Node to evaluate concurrently.

    -> SessionT m () 

    Starts a concurrent thread which evaluates the given Nodes + forever until runSession exits or an exception occurs. Graph + extension happens synchronously, but the resultant run proceeds as + a separate thread.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Tensor.html new file mode 100644 index 0000000..72b46f3 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Tensor.html @@ -0,0 +1,25 @@ +TensorFlow.Tensor

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Tensor

    Synopsis

    Documentation

    data Tensor v a where Source #

    A named output of a TensorFlow operation.

    The type parameter a is the type of the elements in the Tensor. The + parameter v is either:

    • Build: An unrendered, immutable value.
    • Value: A rendered, immutable value.
    • Ref: A rendered stateful handle (e.g., a variable).

    Note that expr, value, render and renderValue can help convert between + the different types of Tensor.

    Constructors

    Tensor :: TensorKind v => {..} -> Tensor v a 

    Fields

    Instances

    newtype Value a Source #

    Constructors

    Value 

    Fields

    Instances

    Monad Value Source # 

    Methods

    (>>=) :: Value a -> (a -> Value b) -> Value b #

    (>>) :: Value a -> Value b -> Value b #

    return :: a -> Value a #

    fail :: String -> Value a #

    Functor Value Source # 

    Methods

    fmap :: (a -> b) -> Value a -> Value b #

    (<$) :: a -> Value b -> Value a #

    Applicative Value Source # 

    Methods

    pure :: a -> Value a #

    (<*>) :: Value (a -> b) -> Value a -> Value b #

    (*>) :: Value a -> Value b -> Value b #

    (<*) :: Value a -> Value b -> Value a #

    TensorKind Value Source # 

    Methods

    toBuild :: Value a -> Build a Source #

    Rendered (Tensor Value) Source # 

    newtype Ref a Source #

    Constructors

    Ref 

    Fields

    Instances

    Monad Ref Source # 

    Methods

    (>>=) :: Ref a -> (a -> Ref b) -> Ref b #

    (>>) :: Ref a -> Ref b -> Ref b #

    return :: a -> Ref a #

    fail :: String -> Ref a #

    Functor Ref Source # 

    Methods

    fmap :: (a -> b) -> Ref a -> Ref b #

    (<$) :: a -> Ref b -> Ref a #

    Applicative Ref Source # 

    Methods

    pure :: a -> Ref a #

    (<*>) :: Ref (a -> b) -> Ref a -> Ref b #

    (*>) :: Ref a -> Ref b -> Ref b #

    (<*) :: Ref a -> Ref b -> Ref a #

    TensorKind Ref Source # 

    Methods

    toBuild :: Ref a -> Build a Source #

    Rendered (Tensor Ref) Source # 

    value :: Tensor Ref a -> Tensor Value a Source #

    Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op.

    data Feed Source #

    A pair of a Tensor and some data that should be fed into that Tensor + when running the graph.

    Constructors

    Feed Output TensorData 

    class Rendered t where Source #

    A class ensuring that a given tensor is rendered, i.e., has a fixed + name, device, etc.

    Minimal complete definition

    renderedOutput

    Methods

    renderedOutput :: t a -> Output Source #

    feed :: Rendered t => t a -> TensorData a -> Feed Source #

    Create a Feed for feeding the given data into a Tensor when running + the graph.

    Note that if a Tensor is rendered, its identity may change; so feeding the + rendered Tensor may be different than feeding the original Tensor.

    tensorFromName :: TensorKind v => Text -> Tensor v a Source #

    Create a Tensor for a given name. This can be used to reference nodes + in a GraphDef that was loaded via addGraphDef. + TODO(judahjacobson): add more safety checks here.

    tensorValueFromName :: Text -> Tensor Value a Source #

    Like tensorFromName, but type-restricted to Value.

    tensorRefFromName :: Text -> Tensor Ref a Source #

    Like tensorFromName, but type-restricted to Ref.

    colocateWith :: (MonadBuild m, Rendered t) => t b -> m a -> m a Source #

    Places all nodes rendered in the given Build action on the same + device as the given Tensor (see also withDevice). Make sure that + the action has side effects of rendering the desired tensors. A pure + return would not have the desired effect.

    render :: MonadBuild m => Tensor Build a -> m (Tensor Value a) Source #

    Render a Tensor, fixing its name, scope, device and control inputs from + the MonadBuild context. Also renders any dependencies of the Tensor that + weren't already rendered.

    This operation is idempotent; calling render on the same input in the same + context will produce the same result. However, rendering the same + Tensor Build in two different contexts may result in two different + Tensor Values.

    addSummary Source #

    Arguments

    :: (MonadBuild m, TensorKind v) 
    => Tensor v ByteString

    A SummaryTensor

    -> m () 

    Records the given summary action in Build for retrieval with + Summary protocol buffer in string form. For safety, use the + pre-composed functions: Logging.scalarSummary and + Logging.histogramSummary.

    collectAllSummaries :: MonadBuild m => m [SummaryTensor] Source #

    Retrieves the summary ops collected thus far. Typically this only + happens once, but if buildWithSummary is used + repeatedly, the values accumulate.

    type SummaryTensor = Tensor Value ByteString Source #

    Synonym for the tensors that return serialized Summary proto.

    class Monad v => TensorKind v where Source #

    An internal class for kinds of Tensors.

    Minimal complete definition

    toBuild

    Methods

    toBuild :: v a -> Build a Source #

    Instances

    class ToTensor t where Source #

    Types which can be converted to Tensor.

    Minimal complete definition

    toTensor

    Methods

    toTensor :: TensorType a => t a -> Tensor Build a Source #

    Instances

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Types.html new file mode 100644 index 0000000..201ba44 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/TensorFlow-Types.html @@ -0,0 +1,12 @@ +TensorFlow.Types

    tensorflow-0.1.0.2: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Types

    Synopsis

    Documentation

    class TensorType a where Source #

    The class of scalar types supported by tensorflow.

    Minimal complete definition

    tensorType, tensorRefType, tensorVal

    Instances

    TensorType Bool Source # 
    TensorType Double Source # 
    TensorType Float Source # 
    TensorType Int8 Source # 
    TensorType Int16 Source # 
    TensorType Int32 Source # 
    TensorType Int64 Source # 
    TensorType Word8 Source # 
    TensorType Word16 Source # 
    TensorType ByteString Source # 
    TensorType ResourceHandle Source # 
    TensorType (Complex Double) Source # 
    TensorType (Complex Float) Source # 

    newtype TensorData a Source #

    Tensor data with the correct memory layout for tensorflow.

    Constructors

    TensorData 

    Instances

    (TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') Source # 

    Methods

    getFetch :: Tensor v a -> Build (Fetch (TensorData a')) Source #

    class TensorType a => TensorDataType s a where Source #

    Types that can be converted to and from TensorData.

    Vector is the most efficient to encode/decode for most element types.

    Minimal complete definition

    decodeTensorData, encodeTensorData

    Methods

    decodeTensorData :: TensorData a -> s a Source #

    Decode the bytes of a TensorData into an s.

    encodeTensorData :: Shape -> s a -> TensorData a Source #

    Encode an s into a TensorData.

    The values should be in row major order, e.g.,

    element 0: index (0, ..., 0) + element 1: index (0, ..., 1) + ...

    Instances

    TensorDataType Vector Bool Source # 
    TensorDataType Vector Double Source # 
    TensorDataType Vector Float Source # 
    TensorDataType Vector Int8 Source # 
    TensorDataType Vector Int16 Source # 
    TensorDataType Vector Int32 Source # 
    TensorDataType Vector Int64 Source # 
    TensorDataType Vector Word8 Source # 
    TensorDataType Vector Word16 Source # 
    (Storable a, TensorDataType Vector a, TensorType a) => TensorDataType Vector a Source # 

    Methods

    decodeTensorData :: TensorData a -> Vector a Source #

    encodeTensorData :: Shape -> Vector a -> TensorData a Source #

    TensorDataType Vector ByteString Source # 
    (TensorDataType Vector a, TensorType a) => TensorDataType Scalar a Source # 
    TensorDataType Vector (Complex Double) Source # 
    TensorDataType Vector (Complex Float) Source # 

    newtype Scalar a Source #

    Constructors

    Scalar 

    Fields

    Instances

    (TensorDataType Vector a, TensorType a) => TensorDataType Scalar a Source # 
    Eq a => Eq (Scalar a) Source # 

    Methods

    (==) :: Scalar a -> Scalar a -> Bool #

    (/=) :: Scalar a -> Scalar a -> Bool #

    Floating a => Floating (Scalar a) Source # 

    Methods

    pi :: Scalar a #

    exp :: Scalar a -> Scalar a #

    log :: Scalar a -> Scalar a #

    sqrt :: Scalar a -> Scalar a #

    (**) :: Scalar a -> Scalar a -> Scalar a #

    logBase :: Scalar a -> Scalar a -> Scalar a #

    sin :: Scalar a -> Scalar a #

    cos :: Scalar a -> Scalar a #

    tan :: Scalar a -> Scalar a #

    asin :: Scalar a -> Scalar a #

    acos :: Scalar a -> Scalar a #

    atan :: Scalar a -> Scalar a #

    sinh :: Scalar a -> Scalar a #

    cosh :: Scalar a -> Scalar a #

    tanh :: Scalar a -> Scalar a #

    asinh :: Scalar a -> Scalar a #

    acosh :: Scalar a -> Scalar a #

    atanh :: Scalar a -> Scalar a #

    log1p :: Scalar a -> Scalar a #

    expm1 :: Scalar a -> Scalar a #

    log1pexp :: Scalar a -> Scalar a #

    log1mexp :: Scalar a -> Scalar a #

    Fractional a => Fractional (Scalar a) Source # 

    Methods

    (/) :: Scalar a -> Scalar a -> Scalar a #

    recip :: Scalar a -> Scalar a #

    fromRational :: Rational -> Scalar a #

    Num a => Num (Scalar a) Source # 

    Methods

    (+) :: Scalar a -> Scalar a -> Scalar a #

    (-) :: Scalar a -> Scalar a -> Scalar a #

    (*) :: Scalar a -> Scalar a -> Scalar a #

    negate :: Scalar a -> Scalar a #

    abs :: Scalar a -> Scalar a #

    signum :: Scalar a -> Scalar a #

    fromInteger :: Integer -> Scalar a #

    Ord a => Ord (Scalar a) Source # 

    Methods

    compare :: Scalar a -> Scalar a -> Ordering #

    (<) :: Scalar a -> Scalar a -> Bool #

    (<=) :: Scalar a -> Scalar a -> Bool #

    (>) :: Scalar a -> Scalar a -> Bool #

    (>=) :: Scalar a -> Scalar a -> Bool #

    max :: Scalar a -> Scalar a -> Scalar a #

    min :: Scalar a -> Scalar a -> Scalar a #

    Real a => Real (Scalar a) Source # 

    Methods

    toRational :: Scalar a -> Rational #

    RealFloat a => RealFloat (Scalar a) Source # 
    RealFrac a => RealFrac (Scalar a) Source # 

    Methods

    properFraction :: Integral b => Scalar a -> (b, Scalar a) #

    truncate :: Integral b => Scalar a -> b #

    round :: Integral b => Scalar a -> b #

    ceiling :: Integral b => Scalar a -> b #

    floor :: Integral b => Scalar a -> b #

    Show a => Show (Scalar a) Source # 

    Methods

    showsPrec :: Int -> Scalar a -> ShowS #

    show :: Scalar a -> String #

    showList :: [Scalar a] -> ShowS #

    IsString a => IsString (Scalar a) Source # 

    Methods

    fromString :: String -> Scalar a #

    newtype Shape Source #

    Shape (dimensions) of a tensor.

    Constructors

    Shape [Int64] 

    Instances

    IsList Shape Source # 

    Associated Types

    type Item Shape :: * #

    Show Shape Source # 

    Methods

    showsPrec :: Int -> Shape -> ShowS #

    show :: Shape -> String #

    showList :: [Shape] -> ShowS #

    Attribute Shape Source # 

    Methods

    attrLens :: Lens' AttrValue Shape Source #

    type Item Shape Source # 

    data DataType :: * #

    Lists

    data ListOf f as where Source #

    A heterogeneous list type.

    Constructors

    Nil :: ListOf f '[] 
    (:/) :: f a -> ListOf f as -> ListOf f (a ': as) infixr 5 

    Instances

    All Eq (Map f as) => Eq (ListOf f as) Source # 

    Methods

    (==) :: ListOf f as -> ListOf f as -> Bool #

    (/=) :: ListOf f as -> ListOf f as -> Bool #

    All Show (Map f as) => Show (ListOf f as) Source # 

    Methods

    showsPrec :: Int -> ListOf f as -> ShowS #

    show :: ListOf f as -> String #

    showList :: [ListOf f as] -> ShowS #

    BuildInputs (ListOf (Tensor v) as) Source # 

    Methods

    buildInputs :: ListOf (Tensor v) as -> Build [Output] Source #

    TensorTypes as => PureResult (TensorList Build as) Source # 

    Methods

    pureResult :: ReaderT * (Build OpDef) (State ResultState) (TensorList Build as) Source #

    (TensorKind v, Rendered (Tensor v), TensorTypes as) => BuildResult (TensorList v as) Source # 

    Methods

    buildResult :: Result (TensorList v as) Source #

    (Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f ((:) * a as)) Source # 

    Methods

    getNodes :: ListOf f ((* ': a) as) -> Build (Set NodeName) Source #

    Nodes (ListOf f ([] *)) Source # 

    Methods

    getNodes :: ListOf f [*] -> Build (Set NodeName) Source #

    (~) * l (List ([] *)) => Fetchable (ListOf f ([] *)) l Source # 

    Methods

    getFetch :: ListOf f [*] -> Build (Fetch l) Source #

    (Fetchable (f t) a, Fetchable (ListOf f ts) (List as), (~) (* -> *) i Identity) => Fetchable (ListOf f ((:) * t ts)) (ListOf i ((:) * a as)) Source # 

    Methods

    getFetch :: ListOf f ((* ': t) ts) -> Build (Fetch (ListOf i ((* ': a) as))) Source #

    (/:/) :: a -> List as -> List (a ': as) infixr 5 Source #

    Equivalent of :/ for lists.

    class TensorTypes ts where Source #

    Minimal complete definition

    tensorTypes

    Instances

    TensorTypes ([] *) Source # 
    (TensorType t, TensorTypes ts) => TensorTypes ((:) * t ts) Source #

    A constraint that the input is a list of TensorTypes.

    Methods

    tensorTypes :: TensorTypeList ((* ': t) ts) Source #

    fromTensorTypes :: forall as. TensorTypes as => Proxy as -> [DataType] Source #

    Type constraints

    type OneOf ts a = (TensorType a, TensorTypes' ts, NoneOf (AllTensorTypes \\ ts) a) Source #

    A Constraint specifying the possible choices of a TensorType.

    We implement a Constraint like OneOf '[Double, Float] a by turning the + natural representation as a conjunction, i.e.,

       a == Double || a == Float
    +

    into a disjunction like

        a /= Int32 && a /= Int64 && a /= ByteString && ...
    +

    using an enumeration of all the possible TensorTypes.

    type family a /= b :: Constraint where ... Source #

    A constraint checking that two types are different.

    Equations

    a /= a = TypeError a ~ ExcludedCase 
    a /= b = () 

    type OneOfs ts as = (TensorTypes as, TensorTypes' ts, NoneOfs (AllTensorTypes \\ ts) as) Source #

    Implementation of constraints

    data TypeError a Source #

    Helper types to produce a reasonable type error message when the Constraint + "a /= a" fails. + TODO(judahjacobson): Use ghc-8's CustomTypeErrors for this.

    type family NoneOf ts a :: Constraint where ... Source #

    A constraint that the type a doesn't appear in the type list ts. + Assumes that a and each of the elements of ts are TensorTypes.

    Equations

    NoneOf (t1 ': (t2 ': (t3 ': (t4 ': ts)))) a = (a /= t1, a /= t2, a /= t3, a /= t4, NoneOf ts a) 
    NoneOf (t1 ': (t2 ': (t3 ': ts))) a = (a /= t1, a /= t2, a /= t3, NoneOf ts a) 
    NoneOf (t1 ': (t2 ': ts)) a = (a /= t1, a /= t2, NoneOf ts a) 
    NoneOf (t1 ': ts) a = (a /= t1, NoneOf ts a) 
    NoneOf '[] a = () 

    type family as \\ bs where ... Source #

    Takes the difference of two lists of types.

    Equations

    as \\ '[] = as 
    as \\ (b ': bs) = Delete b as \\ bs 

    type family Delete a as where ... Source #

    Removes a type from the given list of types.

    Equations

    Delete a '[] = '[] 
    Delete a (a ': as) = Delete a as 
    Delete a (b ': as) = b ': Delete a as 

    type AllTensorTypes = '[Float, Double, Int8, Int16, Int32, Int64, Word8, Word16, ByteString, Bool] Source #

    An enumeration of all valid TensorTypes.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-47.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-47.html similarity index 87% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-47.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-47.html index c1d8049..b01c196 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-47.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-47.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - /)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-58.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-58.html similarity index 85% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-58.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-58.html index 3691430..5bb16d0 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-58.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-58.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - :)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - :

    :/TensorFlow.Types
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - :

    :/TensorFlow.Types
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-92.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-92.html similarity index 85% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-92.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-92.html index 65a2fec..c1c169e 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-92.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-92.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - \)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - \

    \\TensorFlow.Types
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - \

    \\TensorFlow.Types
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-95.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-95.html similarity index 87% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-95.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-95.html index ad632ff..97a93da 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-95.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-95.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - _)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-A.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-A.html similarity index 56% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-A.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-A.html index 2ea6cb4..b35cdaa 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-A.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-A.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - A)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/doc-index-All.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-All.html new file mode 100644 index 0000000..4c25c7a --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-All.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.2: TensorFlow bindings. (Index)

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index

    /:/TensorFlow.Types
    /=TensorFlow.Types, TensorFlow.Core
    :/TensorFlow.Types
    addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
    addInitializerTensorFlow.Build, TensorFlow.Core
    addNewOpTensorFlow.Build
    addSummaryTensorFlow.Tensor
    AllTensorTypesTensorFlow.Types
    asGraphDefTensorFlow.Build, TensorFlow.Core
    asyncProdNodesTensorFlow.Session, TensorFlow.Core
    AttributeTensorFlow.Types
    attrLensTensorFlow.Types
    BuildTensorFlow.Build, TensorFlow.Core
    buildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
    BuildInputsTensorFlow.BuildOp
    buildInputsTensorFlow.BuildOp
    buildOpTensorFlow.BuildOp
    BuildResultTensorFlow.BuildOp
    buildResultTensorFlow.BuildOp
    BuildTTensorFlow.Build, TensorFlow.Core
    collectAllSummariesTensorFlow.Tensor
    colocateWithTensorFlow.Tensor, TensorFlow.Core
    ControlNode 
    1 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Output, TensorFlow.Build
    DataTypeTensorFlow.Types
    decodeTensorDataTensorFlow.Types, TensorFlow.Core
    DeleteTensorFlow.Types
    Device 
    1 (Type/Class)TensorFlow.Output, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Output, TensorFlow.Core
    deviceNameTensorFlow.Output, TensorFlow.Core
    DT_BFLOAT16TensorFlow.Types
    DT_BFLOAT16_REFTensorFlow.Types
    DT_BOOLTensorFlow.Types
    DT_BOOL_REFTensorFlow.Types
    DT_COMPLEX128TensorFlow.Types
    DT_COMPLEX128_REFTensorFlow.Types
    DT_COMPLEX64TensorFlow.Types
    DT_COMPLEX64_REFTensorFlow.Types
    DT_DOUBLETensorFlow.Types
    DT_DOUBLE_REFTensorFlow.Types
    DT_FLOATTensorFlow.Types
    DT_FLOAT_REFTensorFlow.Types
    DT_HALFTensorFlow.Types
    DT_HALF_REFTensorFlow.Types
    DT_INT16TensorFlow.Types
    DT_INT16_REFTensorFlow.Types
    DT_INT32TensorFlow.Types
    DT_INT32_REFTensorFlow.Types
    DT_INT64TensorFlow.Types
    DT_INT64_REFTensorFlow.Types
    DT_INT8TensorFlow.Types
    DT_INT8_REFTensorFlow.Types
    DT_INVALIDTensorFlow.Types
    DT_QINT16TensorFlow.Types
    DT_QINT16_REFTensorFlow.Types
    DT_QINT32TensorFlow.Types
    DT_QINT32_REFTensorFlow.Types
    DT_QINT8TensorFlow.Types
    DT_QINT8_REFTensorFlow.Types
    DT_QUINT16TensorFlow.Types
    DT_QUINT16_REFTensorFlow.Types
    DT_QUINT8TensorFlow.Types
    DT_QUINT8_REFTensorFlow.Types
    DT_RESOURCETensorFlow.Types
    DT_RESOURCE_REFTensorFlow.Types
    DT_STRINGTensorFlow.Types
    DT_STRING_REFTensorFlow.Types
    DT_UINT16TensorFlow.Types
    DT_UINT16_REFTensorFlow.Types
    DT_UINT8TensorFlow.Types
    DT_UINT8_REFTensorFlow.Types
    encodeOutputTensorFlow.Build
    encodeTensorDataTensorFlow.Types, TensorFlow.Core
    eqLengthGuardTensorFlow.BuildOp
    evalBuildTTensorFlow.Build
    ExcludedCaseTensorFlow.Types
    ExplicitNameTensorFlow.Output
    explicitNameTensorFlow.Build
    exprTensorFlow.Tensor, TensorFlow.Core
    extendTensorFlow.Session
    extendGraphTensorFlow.Internal.FFI
    Feed 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    feedTensorFlow.Tensor, TensorFlow.Core
    Fetch 
    1 (Type/Class)TensorFlow.Nodes
    2 (Data Constructor)TensorFlow.Nodes
    FetchableTensorFlow.Nodes, TensorFlow.Core
    fetchesTensorFlow.Nodes
    fetchRestoreTensorFlow.Nodes
    fetchTensorVectorTensorFlow.Nodes
    flushInitializersTensorFlow.Build
    flushNodeBufferTensorFlow.Build
    fromTensorTypeListTensorFlow.Types
    fromTensorTypesTensorFlow.Types
    getAllOpListTensorFlow.Internal.FFI
    getFetchTensorFlow.Nodes
    getNodesTensorFlow.Nodes
    getOrAddOpTensorFlow.Build
    getVarIntTensorFlow.Internal.VarInt
    GraphStateTensorFlow.Build
    groupTensorFlow.ControlFlow, TensorFlow.Core
    hoistBuildTTensorFlow.Build
    ImplicitNameTensorFlow.Output
    implicitNameTensorFlow.Build
    ListTensorFlow.Types
    ListOfTensorFlow.Types
    lookupNodeTensorFlow.Build
    MonadBuildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
    NilTensorFlow.Types
    NodeName 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    NodesTensorFlow.Nodes, TensorFlow.Core
    nodesUnionTensorFlow.Nodes
    NoneOfTensorFlow.Types
    noOpTensorFlow.ControlFlow, TensorFlow.Core
    OneOfTensorFlow.Types, TensorFlow.Core
    OneOfsTensorFlow.Types
    opAttrTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
    opControlInputsTensorFlow.Output, TensorFlow.Build
    OpDef 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    opDefTensorFlow.Build
    opDefWithNameTensorFlow.Build
    opInputsTensorFlow.Output, TensorFlow.Build
    opNameTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
    OpParamsTensorFlow.BuildOp
    OptionsTensorFlow.Session, TensorFlow.Core
    OpType 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    opTypeTensorFlow.Output, TensorFlow.Build
    Output 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    outputTensorFlow.Output
    outputIndexTensorFlow.Output
    OutputIx 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    outputNodeNameTensorFlow.Output
    PendingNodeNameTensorFlow.Output
    protoShapeTensorFlow.Types
    pureOpTensorFlow.BuildOp
    PureResultTensorFlow.BuildOp
    pureResultTensorFlow.BuildOp
    putVarIntTensorFlow.Internal.VarInt
    Ref 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    renderTensorFlow.Tensor, TensorFlow.Core
    RenderedTensorFlow.Tensor
    renderedNodeDefsTensorFlow.Build
    renderedOutputTensorFlow.Tensor
    renderValueTensorFlow.Tensor
    ResourceHandleTensorFlow.Types, TensorFlow.Core
    run 
    1 (Function)TensorFlow.Internal.FFI
    2 (Function)TensorFlow.Session, TensorFlow.Core
    runBuildTTensorFlow.Build
    runRefTensorFlow.Tensor
    runSessionTensorFlow.Session, TensorFlow.Core
    runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
    runValueTensorFlow.Tensor
    runWithFeedsTensorFlow.Session, TensorFlow.Core
    runWithFeeds_TensorFlow.Session, TensorFlow.Core
    run_TensorFlow.Session, TensorFlow.Core
    Scalar 
    1 (Type/Class)TensorFlow.Types, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Types, TensorFlow.Core
    Session 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Type/Class)TensorFlow.Session, TensorFlow.Core
    sessionConfigTensorFlow.Session, TensorFlow.Core
    SessionTTensorFlow.Session
    sessionTargetTensorFlow.Session, TensorFlow.Core
    sessionTracerTensorFlow.Session, TensorFlow.Core
    setSessionConfigTensorFlow.Internal.FFI
    setSessionTargetTensorFlow.Internal.FFI
    Shape 
    1 (Type/Class)TensorFlow.Types, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Types, TensorFlow.Core
    summariesTensorFlow.Build
    SummaryTensorTensorFlow.Tensor
    Tensor 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    TensorData 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Data Constructor)TensorFlow.Internal.FFI
    3 (Type/Class)TensorFlow.Types, TensorFlow.Core
    4 (Data Constructor)TensorFlow.Types
    tensorDataBytesTensorFlow.Internal.FFI
    tensorDataDimensionsTensorFlow.Internal.FFI
    TensorDataTypeTensorFlow.Types, TensorFlow.Core
    tensorDataTypeTensorFlow.Internal.FFI
    TensorFlowException 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Data Constructor)TensorFlow.Internal.FFI
    tensorFromNameTensorFlow.Tensor, TensorFlow.Core
    TensorKindTensorFlow.Tensor
    TensorListTensorFlow.Tensor
    tensorListOutputsTensorFlow.Tensor
    tensorNodeNameTensorFlow.Tensor
    tensorOutputTensorFlow.Tensor
    tensorRefFromNameTensorFlow.Tensor
    tensorRefTypeTensorFlow.Types
    TensorTypeTensorFlow.Types, TensorFlow.Core
    tensorTypeTensorFlow.Types
    TensorTypeListTensorFlow.Types
    TensorTypeProxy 
    1 (Type/Class)TensorFlow.Types
    2 (Data Constructor)TensorFlow.Types
    TensorTypesTensorFlow.Types
    tensorTypesTensorFlow.Types
    tensorValTensorFlow.Types
    tensorValueFromNameTensorFlow.Tensor
    toBuildTensorFlow.Tensor
    ToTensorTensorFlow.Tensor
    toTensorTensorFlow.Tensor
    TypeErrorTensorFlow.Types
    unControlNodeTensorFlow.Output, TensorFlow.Build
    UniqueTensorFlow.Build
    unNodeNameTensorFlow.Output
    unOpTypeTensorFlow.Output
    unOutputIxTensorFlow.Output
    unScalarTensorFlow.Types, TensorFlow.Core
    unTensorDataTensorFlow.Types
    useProtoAsVoidPtrLenTensorFlow.Internal.FFI
    Value 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    valueTensorFlow.Tensor, TensorFlow.Core
    withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
    withDeviceTensorFlow.Build, TensorFlow.Core
    withNameScopeTensorFlow.Build, TensorFlow.Core
    withNodeDependenciesTensorFlow.Build
    withSessionTensorFlow.Internal.FFI
    withStateLensTensorFlow.Build
    \\TensorFlow.Types
    _opAttrsTensorFlow.Output
    _opControlInputsTensorFlow.Output
    _opInputsTensorFlow.Output
    _opNameTensorFlow.Output
    _opTypeTensorFlow.Output
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-B.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-B.html similarity index 90% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-B.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-B.html index 1144633..aefc992 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-B.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-B.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - B)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-C.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-C.html similarity index 89% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-C.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-C.html index 1104328..6200387 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-C.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-C.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - C)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - C

    collectAllSummariesTensorFlow.Tensor
    colocateWithTensorFlow.Tensor, TensorFlow.Core
    ControlNode 
    1 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Output, TensorFlow.Build
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - C

    collectAllSummariesTensorFlow.Tensor
    colocateWithTensorFlow.Tensor, TensorFlow.Core
    ControlNode 
    1 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Output, TensorFlow.Build
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-D.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-D.html similarity index 96% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-D.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-D.html index 635d049..be4e779 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-D.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-D.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - D)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - D

    DataTypeTensorFlow.Types
    decodeTensorDataTensorFlow.Types, TensorFlow.Core
    DeleteTensorFlow.Types
    Device 
    1 (Type/Class)TensorFlow.Output, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Output, TensorFlow.Core
    deviceNameTensorFlow.Output, TensorFlow.Core
    DT_BFLOAT16TensorFlow.Types
    DT_BFLOAT16_REFTensorFlow.Types
    DT_BOOLTensorFlow.Types
    DT_BOOL_REFTensorFlow.Types
    DT_COMPLEX128TensorFlow.Types
    DT_COMPLEX128_REFTensorFlow.Types
    DT_COMPLEX64TensorFlow.Types
    DT_COMPLEX64_REFTensorFlow.Types
    DT_DOUBLETensorFlow.Types
    DT_DOUBLE_REFTensorFlow.Types
    DT_FLOATTensorFlow.Types
    DT_FLOAT_REFTensorFlow.Types
    DT_HALFTensorFlow.Types
    DT_HALF_REFTensorFlow.Types
    DT_INT16TensorFlow.Types
    DT_INT16_REFTensorFlow.Types
    DT_INT32TensorFlow.Types
    DT_INT32_REFTensorFlow.Types
    DT_INT64TensorFlow.Types
    DT_INT64_REFTensorFlow.Types
    DT_INT8TensorFlow.Types
    DT_INT8_REFTensorFlow.Types
    DT_INVALIDTensorFlow.Types
    DT_QINT16TensorFlow.Types
    DT_QINT16_REFTensorFlow.Types
    DT_QINT32TensorFlow.Types
    DT_QINT32_REFTensorFlow.Types
    DT_QINT8TensorFlow.Types
    DT_QINT8_REFTensorFlow.Types
    DT_QUINT16TensorFlow.Types
    DT_QUINT16_REFTensorFlow.Types
    DT_QUINT8TensorFlow.Types
    DT_QUINT8_REFTensorFlow.Types
    DT_RESOURCETensorFlow.Types
    DT_RESOURCE_REFTensorFlow.Types
    DT_STRINGTensorFlow.Types
    DT_STRING_REFTensorFlow.Types
    DT_UINT16TensorFlow.Types
    DT_UINT16_REFTensorFlow.Types
    DT_UINT8TensorFlow.Types
    DT_UINT8_REFTensorFlow.Types
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - D

    DataTypeTensorFlow.Types
    decodeTensorDataTensorFlow.Types, TensorFlow.Core
    DeleteTensorFlow.Types
    Device 
    1 (Type/Class)TensorFlow.Output, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Output, TensorFlow.Core
    deviceNameTensorFlow.Output, TensorFlow.Core
    DT_BFLOAT16TensorFlow.Types
    DT_BFLOAT16_REFTensorFlow.Types
    DT_BOOLTensorFlow.Types
    DT_BOOL_REFTensorFlow.Types
    DT_COMPLEX128TensorFlow.Types
    DT_COMPLEX128_REFTensorFlow.Types
    DT_COMPLEX64TensorFlow.Types
    DT_COMPLEX64_REFTensorFlow.Types
    DT_DOUBLETensorFlow.Types
    DT_DOUBLE_REFTensorFlow.Types
    DT_FLOATTensorFlow.Types
    DT_FLOAT_REFTensorFlow.Types
    DT_HALFTensorFlow.Types
    DT_HALF_REFTensorFlow.Types
    DT_INT16TensorFlow.Types
    DT_INT16_REFTensorFlow.Types
    DT_INT32TensorFlow.Types
    DT_INT32_REFTensorFlow.Types
    DT_INT64TensorFlow.Types
    DT_INT64_REFTensorFlow.Types
    DT_INT8TensorFlow.Types
    DT_INT8_REFTensorFlow.Types
    DT_INVALIDTensorFlow.Types
    DT_QINT16TensorFlow.Types
    DT_QINT16_REFTensorFlow.Types
    DT_QINT32TensorFlow.Types
    DT_QINT32_REFTensorFlow.Types
    DT_QINT8TensorFlow.Types
    DT_QINT8_REFTensorFlow.Types
    DT_QUINT16TensorFlow.Types
    DT_QUINT16_REFTensorFlow.Types
    DT_QUINT8TensorFlow.Types
    DT_QUINT8_REFTensorFlow.Types
    DT_RESOURCETensorFlow.Types
    DT_RESOURCE_REFTensorFlow.Types
    DT_STRINGTensorFlow.Types
    DT_STRING_REFTensorFlow.Types
    DT_UINT16TensorFlow.Types
    DT_UINT16_REFTensorFlow.Types
    DT_UINT8TensorFlow.Types
    DT_UINT8_REFTensorFlow.Types
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-E.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-E.html similarity index 90% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-E.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-E.html index 8f93c0a..546d3f6 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-E.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-E.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - E)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-F.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-F.html similarity index 91% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-F.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-F.html index b68ccf2..98f2b80 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-F.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-F.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - F)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - F

    Feed 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    feedTensorFlow.Tensor, TensorFlow.Core
    Fetch 
    1 (Type/Class)TensorFlow.Nodes
    2 (Data Constructor)TensorFlow.Nodes
    FetchableTensorFlow.Nodes, TensorFlow.Core
    fetchesTensorFlow.Nodes
    fetchRestoreTensorFlow.Nodes
    fetchTensorVectorTensorFlow.Nodes
    flushInitializersTensorFlow.Build
    flushNodeBufferTensorFlow.Build
    fromTensorTypeListTensorFlow.Types
    fromTensorTypesTensorFlow.Types
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - F

    Feed 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    feedTensorFlow.Tensor, TensorFlow.Core
    Fetch 
    1 (Type/Class)TensorFlow.Nodes
    2 (Data Constructor)TensorFlow.Nodes
    FetchableTensorFlow.Nodes, TensorFlow.Core
    fetchesTensorFlow.Nodes
    fetchRestoreTensorFlow.Nodes
    fetchTensorVectorTensorFlow.Nodes
    flushInitializersTensorFlow.Build
    flushNodeBufferTensorFlow.Build
    fromTensorTypeListTensorFlow.Types
    fromTensorTypesTensorFlow.Types
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-G.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-G.html similarity index 88% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-G.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-G.html index 7754769..9a0d579 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-G.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-G.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - G)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-H.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-H.html similarity index 85% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-H.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-H.html index 5cb5a53..a329401 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-H.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-H.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - H)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - H

    hoistBuildTTensorFlow.Build
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - H

    hoistBuildTTensorFlow.Build
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-I.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-I.html similarity index 85% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-I.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-I.html index 10d6dca..efc01e5 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-I.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-I.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - I)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - I

    ImplicitNameTensorFlow.Output
    implicitNameTensorFlow.Build
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - I

    ImplicitNameTensorFlow.Output
    implicitNameTensorFlow.Build
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-L.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-L.html similarity index 87% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-L.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-L.html index 6432c93..8728746 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-L.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-L.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - L)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-M.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-M.html similarity index 85% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-M.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-M.html index 062d654..8b6b839 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-M.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-M.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - M)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-N.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-N.html similarity index 89% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-N.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-N.html index ab37e7d..b9c7cd5 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-N.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-N.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - N)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-O.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-O.html similarity index 94% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-O.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-O.html index 809ce24..4992d02 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-O.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-O.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - O)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-P.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-P.html similarity index 89% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-P.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-P.html index 3781991..641f4d0 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-P.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-P.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - P)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/doc-index-R.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-R.html new file mode 100644 index 0000000..0878248 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-R.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.2: TensorFlow bindings. (Index - R)

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-S.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-S.html similarity index 58% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-S.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-S.html index 544afa5..f15eef7 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-S.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-S.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - S)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-T.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-T.html similarity index 88% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-T.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-T.html index a7f6b35..44725ef 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-T.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-T.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - T)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - T

    Tensor 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    TensorData 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Data Constructor)TensorFlow.Internal.FFI
    3 (Type/Class)TensorFlow.Types, TensorFlow.Core
    4 (Data Constructor)TensorFlow.Types
    tensorDataBytesTensorFlow.Internal.FFI
    tensorDataDimensionsTensorFlow.Internal.FFI
    TensorDataTypeTensorFlow.Types, TensorFlow.Core
    tensorDataTypeTensorFlow.Internal.FFI
    TensorFlowException 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Data Constructor)TensorFlow.Internal.FFI
    tensorFromNameTensorFlow.Tensor, TensorFlow.Core
    TensorKindTensorFlow.Tensor
    TensorListTensorFlow.Tensor
    tensorListOutputsTensorFlow.Tensor
    tensorNodeNameTensorFlow.Tensor
    tensorOutputTensorFlow.Tensor
    tensorRefFromNameTensorFlow.Tensor
    tensorRefTypeTensorFlow.Types
    TensorTypeTensorFlow.Types, TensorFlow.Core
    tensorTypeTensorFlow.Types
    TensorTypeListTensorFlow.Types
    TensorTypeProxy 
    1 (Type/Class)TensorFlow.Types
    2 (Data Constructor)TensorFlow.Types
    TensorTypesTensorFlow.Types
    tensorTypesTensorFlow.Types
    tensorValTensorFlow.Types
    tensorValueFromNameTensorFlow.Tensor
    toBuildTensorFlow.Tensor
    TypeErrorTensorFlow.Types
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - T

    Tensor 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    TensorData 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Data Constructor)TensorFlow.Internal.FFI
    3 (Type/Class)TensorFlow.Types, TensorFlow.Core
    4 (Data Constructor)TensorFlow.Types
    tensorDataBytesTensorFlow.Internal.FFI
    tensorDataDimensionsTensorFlow.Internal.FFI
    TensorDataTypeTensorFlow.Types, TensorFlow.Core
    tensorDataTypeTensorFlow.Internal.FFI
    TensorFlowException 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Data Constructor)TensorFlow.Internal.FFI
    tensorFromNameTensorFlow.Tensor, TensorFlow.Core
    TensorKindTensorFlow.Tensor
    TensorListTensorFlow.Tensor
    tensorListOutputsTensorFlow.Tensor
    tensorNodeNameTensorFlow.Tensor
    tensorOutputTensorFlow.Tensor
    tensorRefFromNameTensorFlow.Tensor
    tensorRefTypeTensorFlow.Types
    TensorTypeTensorFlow.Types, TensorFlow.Core
    tensorTypeTensorFlow.Types
    TensorTypeListTensorFlow.Types
    TensorTypeProxy 
    1 (Type/Class)TensorFlow.Types
    2 (Data Constructor)TensorFlow.Types
    TensorTypesTensorFlow.Types
    tensorTypesTensorFlow.Types
    tensorValTensorFlow.Types
    tensorValueFromNameTensorFlow.Tensor
    toBuildTensorFlow.Tensor
    ToTensorTensorFlow.Tensor
    toTensorTensorFlow.Tensor
    TypeErrorTensorFlow.Types
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-U.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-U.html similarity index 90% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-U.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-U.html index b373d24..6ba3b7a 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-U.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-U.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - U)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-V.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-V.html similarity index 88% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-V.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-V.html index 7177415..bf17e5d 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-V.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-V.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - V)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - V

    Value 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    valueTensorFlow.Tensor, TensorFlow.Core
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - V

    Value 
    1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
    2 (Data Constructor)TensorFlow.Tensor
    valueTensorFlow.Tensor, TensorFlow.Core
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-W.html b/docs/haddock/tensorflow-0.1.0.2/doc-index-W.html similarity index 90% rename from docs/haddock/tensorflow-0.1.0.0/doc-index-W.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index-W.html index f4ff645..bf8c8d6 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index-W.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index-W.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index - W)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index - W

    withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
    withDeviceTensorFlow.Build, TensorFlow.Core
    withNameScopeTensorFlow.Build, TensorFlow.Core
    withNodeDependenciesTensorFlow.Build
    withSessionTensorFlow.Internal.FFI
    withStateLensTensorFlow.Build
    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    Index - W

    withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
    withDeviceTensorFlow.Build, TensorFlow.Core
    withNameScopeTensorFlow.Build, TensorFlow.Core
    withNodeDependenciesTensorFlow.Build
    withSessionTensorFlow.Internal.FFI
    withStateLensTensorFlow.Build
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-0.1.0.2/doc-index.html similarity index 81% rename from docs/haddock/tensorflow-0.1.0.0/doc-index.html rename to docs/haddock/tensorflow-0.1.0.2/doc-index.html index b50be58..dffcabb 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-0.1.0.2/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-0.1.0.0: TensorFlow bindings. (Index)

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-0.1.0.2: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-0.1.0.2/haddock-util.js similarity index 91% rename from docs/haddock/tensorflow-queue-0.1.0.0/haddock-util.js rename to docs/haddock/tensorflow-0.1.0.2/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-queue-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-0.1.0.2/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-0.1.0.2/hslogo-16.png similarity index 100% rename from docs/haddock/tensorflow-0.1.0.0/hslogo-16.png rename to docs/haddock/tensorflow-0.1.0.2/hslogo-16.png diff --git a/docs/haddock/tensorflow-0.1.0.2/index.html b/docs/haddock/tensorflow-0.1.0.2/index.html new file mode 100644 index 0000000..7e2862b --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/index.html @@ -0,0 +1,9 @@ +tensorflow-0.1.0.2: TensorFlow bindings.

    tensorflow-0.1.0.2: TensorFlow bindings.

    tensorflow-0.1.0.2: TensorFlow bindings.

    This library provides an interface to the TensorFlow +bindings. TensorFlow.Core contains the base API for +building and running computational graphs. Other packages +such as tensorflow-ops contain bindings to the actual +computational kernels.

    For more documentation and examples, see +https://github.com/tensorflow/haskell#readme

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Build.html similarity index 96% rename from docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html rename to docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Build.html index cc90c09..b1e82cf 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Build.html @@ -1,4 +1,4 @@ -TensorFlow.Build

    TensorFlow.Build

    Graph node types

    data Unique

    Ops

    The Build monad

    data BuildT m a

    type Build

    class MonadBuild m

    Creating and looking up Ops

    Modifying all nodes in a Build action

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-BuildOp.html similarity index 88% rename from docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html rename to docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-BuildOp.html index 8f06a64..439b7c0 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-BuildOp.html @@ -1,4 +1,4 @@ -TensorFlow.BuildOp

    TensorFlow.BuildOp

    class BuildResult a

    class PureResult a

    class BuildInputs a

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-ControlFlow.html similarity index 83% rename from docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html rename to docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-ControlFlow.html index 5b06fa8..9f7e3a4 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-ControlFlow.html @@ -1,4 +1,4 @@ -TensorFlow.ControlFlow

    TensorFlow.ControlFlow

    Dependencies

    Operations

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Core.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Core.html new file mode 100644 index 0000000..feefc0f --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Core.html @@ -0,0 +1,4 @@ +TensorFlow.Core

    TensorFlow.Core

    Session

    type Session

    data Options

    Building graphs

    class MonadBuild m

    Running graphs

    class Fetchable t a

    class Nodes t

    data Feed

    Async

    Build

    type Build

    data BuildT m a

    Tensor

    data Tensor v a

    data Value a

    data Ref a

    Element types

    class TensorType a

    data TensorData a

    class TensorDataType s a

    data Scalar a

    data Shape

    type OneOf ts a

    type family a /= b :: Constraint where ...

    Op combinators

    data Device

    Dependencies

    Misc

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-FFI.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Internal-FFI.html similarity index 91% rename from docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-FFI.html rename to docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Internal-FFI.html index bd4a296..f6acbc1 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-FFI.html +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Internal-FFI.html @@ -1,4 +1,4 @@ -TensorFlow.Internal.FFI

    TensorFlow.Internal.FFI

    data Session

    Internal helper.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-VarInt.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Internal-VarInt.html similarity index 80% rename from docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-VarInt.html rename to docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Internal-VarInt.html index cd36a75..ae86ad7 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-VarInt.html +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Internal-VarInt.html @@ -1,4 +1,4 @@ -TensorFlow.Internal.VarInt

    TensorFlow.Internal.VarInt

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Nodes.html similarity index 86% rename from docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html rename to docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Nodes.html index e5ea2c1..1f8e8c3 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Nodes.html @@ -1,4 +1,4 @@ -TensorFlow.Nodes

    TensorFlow.Nodes

    class Nodes t

    class Fetchable t a

    data Fetch a

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Output.html similarity index 88% rename from docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html rename to docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Output.html index 27d11f4..aa8d836 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Output.html @@ -1,4 +1,4 @@ -TensorFlow.Output

    TensorFlow.Output

    data Device

    Ops

    data OpDef

    data OpType

    data Output

    \ No newline at end of file +

    TensorFlow.Output

    data Device

    Ops

    data OpDef

    data OpType

    data Output

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Session.html new file mode 100644 index 0000000..a0d39ff --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Session.html @@ -0,0 +1,4 @@ +TensorFlow.Session

    TensorFlow.Session

    type Session

    data SessionT m a

    data Options

    class MonadBuild m

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Tensor.html new file mode 100644 index 0000000..3a172fa --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Tensor.html @@ -0,0 +1,4 @@ +TensorFlow.Tensor

    TensorFlow.Tensor

    data Tensor v a

    data Value a

    data Ref a

    data Feed

    class Rendered t

    type TensorList v

    class TensorKind v

    class ToTensor t

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Types.html new file mode 100644 index 0000000..9da76cc --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/mini_TensorFlow-Types.html @@ -0,0 +1,4 @@ +TensorFlow.Types

    TensorFlow.Types

    class TensorType a

    data TensorData a

    class TensorDataType s a

    data Scalar a

    data Shape

    class Attribute a

    Lists

    data ListOf f as

    type List

    class TensorTypes ts

    Type constraints

    type OneOf ts a

    type family a /= b :: Constraint where ...

    type OneOfs ts as

    Implementation of constraints

    data TypeError a

    type family NoneOf ts a :: Constraint where ...

    type family as \\ bs where ...

    type family Delete a as where ...

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/minus.gif b/docs/haddock/tensorflow-0.1.0.2/minus.gif similarity index 100% rename from docs/haddock/tensorflow-0.1.0.0/minus.gif rename to docs/haddock/tensorflow-0.1.0.2/minus.gif diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/ocean.css b/docs/haddock/tensorflow-0.1.0.2/ocean.css similarity index 92% rename from docs/haddock/tensorflow-queue-0.1.0.0/ocean.css rename to docs/haddock/tensorflow-0.1.0.2/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-queue-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-0.1.0.2/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-0.1.0.0/plus.gif b/docs/haddock/tensorflow-0.1.0.2/plus.gif similarity index 100% rename from docs/haddock/tensorflow-0.1.0.0/plus.gif rename to docs/haddock/tensorflow-0.1.0.2/plus.gif diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Build.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Build.html new file mode 100644 index 0000000..73ddcb1 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Build.html @@ -0,0 +1,339 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE GeneralizedNewtypeDeriving #-}
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE LambdaCase #-}
    +{-# LANGUAGE FunctionalDependencies #-}
    +{-# LANGUAGE MultiParamTypeClasses #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE Rank2Types #-}
    +{-# LANGUAGE TypeFamilies #-}
    +module TensorFlow.Build
    +    ( -- * Graph node types
    +      ControlNode(..)
    +    , Unique
    +    -- * Ops
    +    , explicitName
    +    , implicitName
    +    , opDef
    +    , opDefWithName
    +    , opName
    +    , opType
    +    , opAttr
    +    , opInputs
    +    , opControlInputs
    +    -- * The Build monad
    +    , GraphState
    +    , renderedNodeDefs
    +    , BuildT
    +    , Build
    +    , MonadBuild(..)
    +    , addInitializer
    +    , hoistBuildT
    +    , evalBuildT
    +    , runBuildT
    +    , asGraphDef
    +    , addGraphDef
    +    , flushInitializers
    +    , flushNodeBuffer
    +    , summaries
    +    -- * Creating and looking up Ops
    +    , getOrAddOp
    +    , addNewOp
    +    , encodeOutput
    +    , lookupNode
    +    -- * Modifying all nodes in a Build action
    +    , withStateLens
    +    , withDevice
    +    , withNameScope
    +    , withNodeDependencies
    +    ) where
    +
    +import Control.Monad.Catch (MonadThrow, MonadCatch, MonadMask)
    +import Control.Monad.Fix (MonadFix(..))
    +import Control.Monad.IO.Class (MonadIO(..))
    +import Control.Monad.Trans.Class (MonadTrans(..))
    +import Control.Monad.Trans.State.Strict(StateT(..), mapStateT, evalStateT)
    +import Data.Default (def)
    +import Data.Functor.Identity (Identity(..))
    +import qualified Data.Map.Strict as Map
    +import Data.Monoid ((<>))
    +import qualified Data.Set as Set
    +import Data.Set (Set)
    +import Data.String (IsString(..))
    +import Data.Text (Text)
    +import qualified Data.Text as Text
    +import Lens.Family2 (Lens', (.~), (^.), (&))
    +import Lens.Family2.State.Strict (MonadState, use, uses, (.=), (<>=), (%=))
    +import Lens.Family2.Unchecked (lens)
    +import Proto.Tensorflow.Core.Framework.Graph
    +    ( GraphDef
    +    , node
    +    )
    +import Proto.Tensorflow.Core.Framework.NodeDef
    +    ( NodeDef
    +    , attr
    +    , input
    +    , device
    +    , name
    +    , op
    +    )
    +
    +import TensorFlow.Output
    +
    +newtype Unique = Unique Int
    +    deriving (Eq, Ord, Enum)
    +
    +--------------
    +
    +implicitName :: PendingNodeName
    +implicitName = ImplicitName
    +
    +explicitName :: Text -> PendingNodeName
    +explicitName = ExplicitName
    +
    +newtype Scope = Scope {unScope :: Text}
    +    deriving (Eq, Ord, IsString)
    +
    +instance Show Scope where
    +    show = show . unScope
    +
    +opDef :: OpType -> OpDef
    +opDef = opDefWithName ImplicitName
    +
    +opDefWithName :: PendingNodeName -> OpType -> OpDef
    +opDefWithName n t = OpDef
    +    { _opName = n
    +    , _opType = t
    +    , _opAttrs = Map.empty
    +    , _opInputs = []
    +    , _opControlInputs = []
    +    }
    +
    +data GraphState = GraphState
    +    { _renderedNodes :: !(Map.Map PendingNode NodeDef)
    +        -- ^ Nodes which have been rendered.  Keeps track of the unique ID we
    +        -- assign each implicitly-named node.  Also prevents us from adding the
    +        -- same node (implicit or explicit) more than once to the nodeBuffer.
    +    , _renderedNodeDefs :: !(Map.Map NodeName NodeDef)
    +        -- ^ The NodeDefs of nodes which have been rendered. Used by the
    +        -- Gradient module to inspect the node graph.
    +    , _nodeBuffer :: [NodeDef]
    +        -- ^ A list of nodes that should be passed to TensorFlow during
    +        -- the next call to Session.extend (TF_ExtendGraph).
    +    , _nextUnique :: !Unique
    +        -- ^ Unique ID for the next node
    +    -- TODO(judahjacobson): watch for clashes between auto and user names.
    +    , _defaultDevice :: !(Maybe Device)
    +    , _currentScope :: [Scope]
    +    , _defaultControlInputs :: !(Set NodeName)
    +    , _initializationNodes  :: [NodeName]
    +      -- ^ The nodes to run next time a TF.run is issued, typically
    +      -- variable initializers.
    +    , _summaries :: [Output]
    +      -- ^ The tensors for summary (ByteString type)
    +    }
    +
    +-- | A node definition without its final name.  Used as a key in the
    +-- "renderedNodes" map.
    +-- The NodeDef contained inside has an empty "name" field.
    +data PendingNode = PendingNode [Scope] !PendingNodeName !NodeDef
    +    deriving (Eq, Ord)
    +
    +-- Returns an _incomplete_ NodeDef. The name is fixed by addNewOpFromPending.
    +pendingNodeDef :: PendingNode -> NodeDef
    +pendingNodeDef (PendingNode _ _ n) = n
    +
    +initGraphState :: GraphState
    +initGraphState =
    +    GraphState Map.empty Map.empty [] (Unique 0) Nothing [] Set.empty [] []
    +
    +renderedNodes :: Lens' GraphState (Map.Map PendingNode NodeDef)
    +renderedNodes = lens _renderedNodes (\g x -> g { _renderedNodes = x })
    +
    +renderedNodeDefs :: Lens' GraphState (Map.Map NodeName NodeDef)
    +renderedNodeDefs = lens _renderedNodeDefs (\g x -> g { _renderedNodeDefs = x })
    +
    +nodeBuffer :: Lens' GraphState [NodeDef]
    +nodeBuffer = lens _nodeBuffer (\g x -> g { _nodeBuffer = x })
    +
    +nextUnique :: Lens' GraphState Unique
    +nextUnique = lens _nextUnique (\g x -> g { _nextUnique = x })
    +
    +defaultDevice :: Lens' GraphState (Maybe Device)
    +defaultDevice = lens _defaultDevice (\g x -> g { _defaultDevice = x })
    +
    +currentScope :: Lens' GraphState [Scope]
    +currentScope = lens _currentScope (\g x -> g { _currentScope = x })
    +
    +defaultControlInputs :: Lens' GraphState (Set NodeName)
    +defaultControlInputs = lens _defaultControlInputs
    +                          (\g x -> g { _defaultControlInputs = x })
    +
    +initializationNodes :: Lens' GraphState [NodeName]
    +initializationNodes = lens _initializationNodes (\g x -> g { _initializationNodes = x })
    +
    +summaries :: Lens' GraphState [Output]
    +summaries = lens _summaries (\g x -> g { _summaries = x })
    +
    +-- | An action for building nodes in a TensorFlow graph.
    +-- Used to manage build state internally as part of the @Session@ monad.
    +newtype BuildT m a = BuildT (StateT GraphState m a)
    +    deriving (Functor, Applicative, Monad, MonadIO, MonadTrans,
    +              MonadState GraphState, MonadThrow, MonadCatch, MonadMask,
    +              MonadFix)
    +
    +-- | An action for building nodes in a TensorFlow graph.
    +type Build = BuildT Identity
    +
    +-- | This is Control.Monad.Morph.hoist sans the dependency.
    +hoistBuildT :: (forall a . m a -> n a) -> BuildT m b -> BuildT n b
    +hoistBuildT f (BuildT m) = BuildT $ mapStateT f m
    +
    +runBuildT :: BuildT m a -> m (a, GraphState)
    +runBuildT (BuildT f) = runStateT f initGraphState
    +
    +evalBuildT :: Monad m => BuildT m a -> m a
    +evalBuildT (BuildT f) = evalStateT f initGraphState
    +
    +-- | Lift a 'Build' action into a monad, including any explicit op renderings.
    +class Monad m => MonadBuild m where
    +    build :: Build a -> m a
    +
    +instance Monad m => MonadBuild (BuildT m) where
    +    build = hoistBuildT $ return . runIdentity
    +
    +-- | Get all the NodeDefs that have accumulated so far, and clear that buffer.
    +flushNodeBuffer :: MonadBuild m => m [NodeDef]
    +flushNodeBuffer = build $ do
    +    ns <- use nodeBuffer
    +    nodeBuffer .= []
    +    return ns
    +
    +-- | Get all the initializers that have accumulated so far, and clear
    +-- that buffer.
    +flushInitializers :: Monad m => BuildT m [NodeName]
    +flushInitializers = do
    +    ns <- use initializationNodes
    +    initializationNodes .= []
    +    return ns
    +
    +-- | Registers the given node to be executed before the next
    +-- 'TensorFlow.Session.run'.
    +addInitializer :: MonadBuild m => ControlNode -> m ()
    +addInitializer (ControlNode i) = build $ initializationNodes %= (i:)
    +
    +-- | Produce a GraphDef proto representation of the nodes that are rendered in
    +-- the given 'Build' action.
    +asGraphDef :: Build a -> GraphDef
    +asGraphDef b = def & node .~ gs ^. nodeBuffer
    +  where
    +    gs = snd $ runIdentity $ runBuildT b
    +
    +-- TODO: check against existing nodes for conflicts?
    +addGraphDef :: MonadBuild m => GraphDef -> m ()
    +addGraphDef g = build $ nodeBuffer <>= g ^. node
    +
    +-- | Render the given op if it hasn't been rendered already, and return its
    +-- name.
    +getOrAddOp :: OpDef -> Build NodeName
    +getOrAddOp o = do
    +    pending <- getPendingNode o
    +    uses renderedNodes (Map.lookup pending) >>= \case
    +        Just n -> return $ NodeName $ n ^. name
    +        Nothing -> addNewOpFromPending pending
    +
    +lookupNode :: NodeName -> Build NodeDef
    +lookupNode n = uses renderedNodeDefs (Map.lookup n) >>= \case
    +    Just n' -> return n'
    +    Nothing -> error $ "lookupNode: unknown node name " ++ show n
    +
    +-- | Add a new node for a given 'OpDef'.  This is used for making "stateful" ops
    +-- which are not safe to dedup (e.g, "variable" and "assign").
    +addNewOp :: OpDef -> Build NodeName
    +addNewOp o = getPendingNode o >>= addNewOpFromPending
    +
    +addNewOpFromPending :: PendingNode -> Build NodeName
    +addNewOpFromPending pending = do
    +    nodeName <- renderPendingNode pending
    +    let nodeDef = pendingNodeDef pending & name .~ unNodeName nodeName
    +    nodeBuffer %= (nodeDef :)
    +    renderedNodes %= Map.insert pending nodeDef
    +    renderedNodeDefs %= Map.insert nodeName nodeDef
    +    return nodeName
    +
    +-- | Get the pending node corresponding to an OpDef, which may or may not have
    +-- been rendered before.  Implicitly renders all of this node's inputs.
    +getPendingNode :: OpDef -> Build PendingNode
    +getPendingNode o = do
    +    -- An empty string in the proto field means that no specific
    +    -- device is specified.
    +    dev <- maybe "" deviceName <$> use defaultDevice
    +    scope <- use currentScope
    +    controls <- use defaultControlInputs
    +    let inputs = map encodeOutput (o ^. opInputs)
    +    let controlInputs
    +            = map makeDep (o ^. opControlInputs ++ Set.toList controls)
    +    return $ PendingNode scope (o ^. opName)
    +            $ def & op .~ (unOpType (o ^. opType) :: Text)
    +                  & attr .~ _opAttrs o
    +                  & input .~ (inputs ++ controlInputs)
    +                  & device .~ dev
    +  where
    +    makeDep = ("^" <>) . unNodeName
    +
    +-- | Pick a name for a pending node.  If it has an explicit name, just use that;
    +-- if the name is implicit, assign a new unique name based on the op type.
    +renderPendingNode :: PendingNode -> Build NodeName
    +renderPendingNode (PendingNode scope pendingName nodeDef)
    +    = NodeName . (scopePrefix <>) <$> getName
    +  where
    +    scopePrefix = Text.concat $ fmap ((<> "/") . unScope) scope
    +    getName = case pendingName of
    +        ExplicitName n -> return n
    +        ImplicitName -> do
    +            u@(Unique k) <- use nextUnique
    +            nextUnique .= succ u
    +            return $ nodeDef ^. op <> "_" <> Text.pack (show k)
    +
    +-- | Turn an 'Output' into a string representation for the TensorFlow
    +-- foreign APIs.
    +encodeOutput :: Output -> Text
    +encodeOutput (Output (OutputIx 0) n) = unNodeName n
    +encodeOutput (Output (OutputIx i) n) = unNodeName n <> Text.pack (':' : show i)
    +
    +-- | Modify some part of the state, run an action, and restore the state
    +-- after that action is done.
    +withStateLens :: MonadBuild m => Lens' GraphState a -> (a -> a) -> m b -> m b
    +withStateLens accessor f act = do
    +    old <- build $ use accessor
    +    build $ accessor %= f
    +    result <- act
    +    build $ accessor .= old
    +    return result
    +
    +-- | Set a device for all nodes rendered in the given 'Build' action
    +-- (unless further overridden by another use of withDevice).
    +withDevice :: MonadBuild m => Maybe Device -> m a -> m a
    +withDevice d = withStateLens defaultDevice (const d)
    +
    +-- | Prepend a scope to all nodes rendered in the given 'Build' action.
    +withNameScope :: MonadBuild m => Text -> m a -> m a
    +withNameScope s = withStateLens currentScope (Scope s :)
    +
    +-- | Add control inputs to all nodes rendered in the given 'Build' action.
    +withNodeDependencies :: MonadBuild m => Set NodeName -> m a -> m a
    +withNodeDependencies nodes = withStateLens defaultControlInputs (<> nodes)
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.BuildOp.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.BuildOp.html new file mode 100644 index 0000000..a1cfc61 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.BuildOp.html @@ -0,0 +1,307 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE FlexibleContexts #-}
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE GADTs #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +{-# LANGUAGE TupleSections #-}
    +
    +module TensorFlow.BuildOp
    +    ( BuildResult(..)
    +    , buildOp
    +    , PureResult(..)
    +    , pureOp
    +    , eqLengthGuard
    +    , BuildInputs(..)
    +    , OpParams
    +    )
    +  where
    +
    +import Control.Monad (liftM2, replicateM)
    +import Control.Monad.Reader (ReaderT, runReaderT, ask)
    +import Control.Monad.State.Strict (State, evalState, get, put)
    +import Data.Int (Int64)
    +
    +import TensorFlow.Build
    +import TensorFlow.Output
    +import TensorFlow.Tensor
    +import TensorFlow.Types
    +
    +data ResultState = ResultState !OutputIx [Int64] deriving Show
    +
    +type Result = ReaderT NodeName (State ResultState)
    +
    +-- | Class of types that can be used as op outputs.
    +class BuildResult a where
    +    buildResult :: Result a
    +
    +instance (BuildResult a1, BuildResult a2) => BuildResult (a1, a2) where
    +    buildResult = (,) <$> buildResult <*> buildResult
    +
    +instance (BuildResult a1, BuildResult a2, BuildResult a3) => BuildResult (a1, a2, a3) where
    +    buildResult = (,,) <$> buildResult <*> buildResult <*> buildResult
    +
    +instance (BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4)
    +         => BuildResult (a1, a2, a3, a4) where
    +    buildResult = (,,,) <$> buildResult <*> buildResult <*> buildResult <*> buildResult
    +
    +instance (BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5)
    +         => BuildResult (a1, a2, a3, a4, a5) where
    +    buildResult = (,,,,) <$> buildResult
    +                      <*> buildResult
    +                      <*> buildResult
    +                      <*> buildResult
    +                      <*> buildResult
    +
    +instance ( BuildResult a1
    +         , BuildResult a2
    +         , BuildResult a3
    +         , BuildResult a4
    +         , BuildResult a5
    +         , BuildResult a6
    +         )
    +         => BuildResult (a1, a2, a3, a4, a5, a6) where
    +    buildResult = (,,,,,)
    +               <$> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +
    +instance ( BuildResult a1
    +         , BuildResult a2
    +         , BuildResult a3
    +         , BuildResult a4
    +         , BuildResult a5
    +         , BuildResult a6
    +         , BuildResult a7
    +         )
    +         => BuildResult (a1, a2, a3, a4, a5, a6, a7) where
    +    buildResult = (,,,,,,)
    +               <$> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +
    +instance ( BuildResult a1
    +         , BuildResult a2
    +         , BuildResult a3
    +         , BuildResult a4
    +         , BuildResult a5
    +         , BuildResult a6
    +         , BuildResult a7
    +         , BuildResult a8
    +         )
    +         => BuildResult (a1, a2, a3, a4, a5, a6, a7, a8) where
    +    buildResult = (,,,,,,,)
    +               <$> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +               <*> buildResult
    +
    +recordResult :: Result Output
    +recordResult = do
    +    o <- ask
    +    ResultState i ns <- get
    +    put $! ResultState (i+1) ns
    +    return $! output i o
    +
    +instance (TensorKind v, Rendered (Tensor v)) => BuildResult (Tensor v a) where
    +    buildResult = Tensor . pure <$> recordResult
    +
    +instance BuildResult ControlNode where
    +    buildResult = ControlNode <$> ask
    +
    +instance (TensorKind v, Rendered (Tensor v), TensorTypes as) => BuildResult (TensorList v as) where
    +  buildResult = loop (tensorTypes :: TensorTypeList as)
    +    where
    +        loop :: TensorTypeList bs -> Result (TensorList v bs)
    +        loop Nil = return Nil
    +        loop (TensorTypeProxy :/ ls) = do
    +            t <- buildResult
    +            ts <- loop ls
    +            return (t :/ ts)
    +
    +instance BuildResult a => BuildResult [a] where
    +    buildResult = do
    +        ResultState i ns <- get
    +        case ns of
    +            [] -> error $ "Ran out of counts in buildResult. " ++
    +                          "Likely misuse of buildOp."
    +            (n : rest) -> do
    +                put $! ResultState i rest
    +                replicateM (fromIntegral n) buildResult
    +
    +buildOp :: BuildResult a => [Int64] -> OpDef -> Build a
    +buildOp sizes o = do
    +    n <- addNewOp o
    +    return $ flip evalState (ResultState 0 sizes) (runReaderT buildResult n)
    +
    +-- | Returns true if all the integers in each tuple are identical.
    +-- Throws an error with a descriptive message if not.
    +eqLengthGuard :: [(String, [(String, Int)])] -> Bool
    +eqLengthGuard = all eachOk
    +  where
    +    eachOk (_, []) = True
    +    -- The next line has (== 1) . length . nub in disguise
    +    eachOk (numberAttrName, pairs@((_, x) : zs)) = all (\z -> snd z == x) zs ||
    +        error ("number_attr " ++ numberAttrName ++
    +               " contains tensors with different length " ++ show pairs)
    +
    +-----------
    +
    +
    +-- | Class of types that can be used as op outputs.
    +class PureResult a where
    +    pureResult :: ReaderT (Build OpDef) (State ResultState) a
    +
    +instance PureResult (Tensor Build a) where
    +    pureResult = do
    +        ResultState i ns <- get
    +        put $! ResultState (i+1) ns
    +        makeOp <- ask
    +        return $ Tensor $ do
    +            o <- makeOp
    +            -- TODO: unify with BuildResult (Tensor v)
    +            output i <$> getOrAddOp o
    +
    +instance (PureResult a1, PureResult a2) => PureResult (a1, a2) where
    +    pureResult = (,) <$> pureResult <*> pureResult
    +
    +instance (PureResult a1, PureResult a2, PureResult a3) => PureResult (a1, a2, a3) where
    +    pureResult = (,,) <$> pureResult <*> pureResult <*> pureResult
    +
    +instance (PureResult a1, PureResult a2, PureResult a3, PureResult a4)
    +         => PureResult (a1, a2, a3, a4) where
    +    pureResult = (,,,) <$> pureResult <*> pureResult <*> pureResult <*> pureResult
    +
    +instance (PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5)
    +         => PureResult (a1, a2, a3, a4, a5) where
    +    pureResult = (,,,,) <$> pureResult
    +                      <*> pureResult
    +                      <*> pureResult
    +                      <*> pureResult
    +                      <*> pureResult
    +
    +instance ( PureResult a1
    +         , PureResult a2
    +         , PureResult a3
    +         , PureResult a4
    +         , PureResult a5
    +         , PureResult a6
    +         )
    +         => PureResult (a1, a2, a3, a4, a5, a6) where
    +    pureResult = (,,,,,)
    +               <$> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +
    +instance ( PureResult a1
    +         , PureResult a2
    +         , PureResult a3
    +         , PureResult a4
    +         , PureResult a5
    +         , PureResult a6
    +         , PureResult a7
    +         )
    +         => PureResult (a1, a2, a3, a4, a5, a6, a7) where
    +    pureResult = (,,,,,,)
    +               <$> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +
    +instance ( PureResult a1
    +         , PureResult a2
    +         , PureResult a3
    +         , PureResult a4
    +         , PureResult a5
    +         , PureResult a6
    +         , PureResult a7
    +         , PureResult a8
    +         )
    +         => PureResult (a1, a2, a3, a4, a5, a6, a7, a8) where
    +    pureResult = (,,,,,,,)
    +               <$> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +               <*> pureResult
    +
    +instance PureResult a => PureResult [a] where
    +    pureResult = do
    +        ResultState i ns <- get
    +        case ns of
    +            [] -> error $ "Ran out of counts in pureResult. " ++
    +                          "Likely misuse of pureOp with output lists."
    +            n : rest -> do
    +                put $! ResultState i rest
    +                replicateM (fromIntegral n) pureResult
    +
    +instance TensorTypes as => PureResult (TensorList Build as) where
    +    pureResult = loop (tensorTypes :: TensorTypeList as)
    +      where
    +        loop :: TensorTypeList bs -> ReaderT (Build OpDef) (State ResultState)
    +                                        (TensorList Build bs)
    +        loop Nil = return Nil
    +        loop (TensorTypeProxy :/ ls) = do
    +            t <- pureResult
    +            ts <- loop ls
    +            return (t :/ ts)
    +
    +pureOp :: PureResult a => [Int64] -> Build OpDef -> a
    +pureOp sizes o = flip evalState (ResultState 0 sizes) (runReaderT pureResult o)
    +
    +-----
    +-- Class of types that can be used as arguments
    +
    +class BuildInputs a where
    +    buildInputs :: a -> Build [Output]
    +
    +instance BuildInputs a => BuildInputs [a] where
    +    buildInputs = fmap concat . mapM buildInputs
    +
    +instance BuildInputs (Tensor v a) where
    +    buildInputs (Tensor t) = do
    +        o <- toBuild t
    +        return [o]
    +
    +instance BuildInputs (ListOf (Tensor v) as) where
    +    buildInputs Nil = return []
    +    buildInputs (t :/ ts) = liftM2 (++) (buildInputs t) (buildInputs ts)
    +
    +----
    +
    +-- | Parameters to build an op (for example, the node name or optional attributes).
    +-- TODO: be more type safe.
    +type OpParams = OpDef -> OpDef
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.ControlFlow.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.ControlFlow.html new file mode 100644 index 0000000..267e462 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.ControlFlow.html @@ -0,0 +1,51 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE GADTs #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE RankNTypes #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +
    +module TensorFlow.ControlFlow
    +    ( -- * Dependencies
    +      withControlDependencies
    +    , group
    +      -- * Operations
    +    , noOp
    +    ) where
    +
    +import TensorFlow.BuildOp
    +import TensorFlow.Build
    +import TensorFlow.Nodes
    +
    +-- | Modify a 'Build' action, such that all new ops rendered in it will depend
    +-- on the nodes in the first argument.
    +withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a
    +withControlDependencies deps act = do
    +    nodes <- build $ getNodes deps
    +    withNodeDependencies nodes act
    +
    +-- TODO(judahjacobson): Reimplement withDependencies.
    +
    +-- | Create an op that groups multiple operations.
    +--
    +-- When this op finishes, all ops in the input @n@ have finished.  This op has
    +-- no output.
    +group :: (MonadBuild m, Nodes t) => t -> m ControlNode
    +group deps = withControlDependencies deps noOp
    +
    +-- | Does nothing.  Only useful as a placeholder for control edges.
    +noOp :: MonadBuild m => m ControlNode
    +noOp = build $ buildOp [] $ opDef "NoOp"
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Core.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Core.html new file mode 100644 index 0000000..d6f7f94 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Core.html @@ -0,0 +1,93 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE ExplicitNamespaces #-}
    +
    +-- | The core functionality of TensorFlow.
    +--
    +-- Unless you are defining ops, you do not need to import other modules from
    +-- this package.
    +--
    +-- Basic ops are provided in the tensorflow-ops and tensorflow-core-ops
    +-- packages.
    +module TensorFlow.Core
    +    ( -- * Session
    +      Session
    +    , Options
    +    , sessionConfig
    +    , sessionTarget
    +    , sessionTracer
    +    , runSession
    +    , runSessionWithOptions
    +      -- ** Building graphs
    +    , MonadBuild(..)
    +      -- ** Running graphs
    +    , Fetchable
    +    , Nodes
    +    , run
    +    , run_
    +    , Feed
    +    , feed
    +    , runWithFeeds
    +    , runWithFeeds_
    +      -- ** Async
    +    , asyncProdNodes
    +
    +      -- * Build
    +    , Build
    +    , BuildT
    +    , render
    +    , asGraphDef
    +    , addGraphDef
    +    , opName
    +    , opAttr
    +    , addInitializer
    +      -- * Tensor
    +    , ControlNode
    +    , Tensor
    +    , Value
    +    , Ref
    +    , value
    +    , tensorFromName
    +    , expr
    +      -- ** Element types
    +    , TensorType
    +    , TensorData
    +    , TensorDataType(decodeTensorData, encodeTensorData)
    +    , ResourceHandle
    +    , Scalar(..)
    +    , Shape(..)
    +    , OneOf
    +    , type (/=)
    +
    +      -- * Op combinators
    +    , colocateWith
    +    , Device(..)
    +    , withDevice
    +    , withNameScope
    +      -- ** Dependencies
    +    , withControlDependencies
    +    , group
    +      -- ** Misc
    +    , noOp
    +    ) where
    +
    +import TensorFlow.Build
    +import TensorFlow.ControlFlow
    +import TensorFlow.Nodes
    +import TensorFlow.Output
    +import TensorFlow.Session
    +import TensorFlow.Tensor
    +import TensorFlow.Types
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.FFI.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.FFI.html new file mode 100644 index 0000000..bc2b734 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.FFI.html @@ -0,0 +1,267 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE DeriveDataTypeable #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +
    +module TensorFlow.Internal.FFI
    +    ( TensorFlowException(..)
    +    , Raw.Session
    +    , withSession
    +    , extendGraph
    +    , run
    +    , TensorData(..)
    +    , setSessionConfig
    +    , setSessionTarget
    +    , getAllOpList
    +      -- * Internal helper.
    +    , useProtoAsVoidPtrLen
    +    )
    +    where
    +
    +import Control.Concurrent.Async (Async, async, cancel, waitCatch)
    +import Control.Concurrent.MVar (MVar, modifyMVarMasked_, newMVar, takeMVar)
    +import Control.Monad (when)
    +import Control.Monad.Catch (MonadMask, Exception, throwM, bracket, finally, mask_)
    +import Control.Monad.IO.Class (MonadIO, liftIO)
    +import Data.Bits (Bits, toIntegralSized)
    +import Data.Int (Int64)
    +import Data.Maybe (fromMaybe)
    +import Data.Typeable (Typeable)
    +import Data.Word (Word8)
    +import Foreign (Ptr, FunPtr, nullPtr, castPtr)
    +import Foreign.C.String (CString)
    +import Foreign.ForeignPtr (newForeignPtr, newForeignPtr_, withForeignPtr)
    +import Foreign.Marshal.Alloc (free)
    +import Foreign.Marshal.Array (withArrayLen, peekArray, mallocArray, copyArray)
    +import System.IO.Unsafe (unsafePerformIO)
    +import qualified Data.ByteString as B
    +import qualified Data.Text as T
    +import qualified Data.Text.Encoding as T
    +import qualified Data.Text.Encoding.Error as T
    +import qualified Data.Vector.Storable as S
    +import qualified Data.Vector.Storable.Mutable as M
    +
    +import Data.ProtoLens (Message, encodeMessage)
    +import Proto.Tensorflow.Core.Framework.Graph (GraphDef)
    +import Proto.Tensorflow.Core.Framework.Types (DataType(..))
    +import Proto.Tensorflow.Core.Protobuf.Config (ConfigProto)
    +
    +import qualified TensorFlow.Internal.Raw as Raw
    +
    +data TensorFlowException = TensorFlowException Raw.Code T.Text
    +    deriving (Show, Eq, Typeable)
    +
    +instance Exception TensorFlowException
    +
    +-- | All of the data needed to represent a tensor.
    +data TensorData = TensorData
    +    { tensorDataDimensions :: [Int64]
    +    , tensorDataType       :: !DataType
    +    , tensorDataBytes      :: !(S.Vector Word8)
    +    }
    +  deriving (Show, Eq)
    +
    +-- | Runs the given action after creating a session with options
    +-- populated by the given optionSetter.
    +withSession :: (MonadIO m, MonadMask m)
    +            => (Raw.SessionOptions -> IO ())
    +            -> ((IO () -> IO ()) -> Raw.Session -> m a)
    +            -- ^ The action can spawn concurrent tasks which will
    +            -- be canceled before withSession returns.
    +            -> m a
    +withSession optionSetter action = do
    +    drain <- liftIO $ newMVar []
    +    let cleanup s =
    +        -- Closes the session to nudge the pending run calls to fail and exit.
    +            finally (checkStatus (Raw.closeSession s)) $ do
    +                runners <- takeMVar drain
    +                -- Collects all runners before deleting the session.
    +                mapM_ shutDownRunner runners
    +                checkStatus (Raw.deleteSession s)
    +    let bracketIO x y = bracket (liftIO x) (liftIO . y)
    +    bracketIO Raw.newSessionOptions Raw.deleteSessionOptions $ \options -> do
    +        bracketIO
    +            (optionSetter options >> checkStatus (Raw.newSession options))
    +            cleanup
    +            (action (asyncCollector drain))
    +
    +asyncCollector :: MVar [Async ()] -> IO () -> IO ()
    +asyncCollector drain runner = modifyMVarMasked_ drain launchAndRecord
    +    where
    +      launchAndRecord restRunners = (: restRunners) <$> async runner
    +
    +shutDownRunner :: Async () -> IO ()
    +shutDownRunner r = do
    +    cancel r
    +    -- TODO(gnezdo): manage exceptions better than print.
    +    either print (const (return ())) =<< waitCatch r
    +
    +extendGraph :: Raw.Session -> GraphDef -> IO ()
    +extendGraph session pb =
    +    useProtoAsVoidPtrLen pb $ \ptr len ->
    +        checkStatus $ Raw.extendGraph session ptr len
    +
    +
    +run :: Raw.Session
    +    -> [(B.ByteString, TensorData)] -- ^ Feeds.
    +    -> [B.ByteString]               -- ^ Fetches.
    +    -> [B.ByteString]               -- ^ Targets.
    +    -> IO [TensorData]
    +run session feeds fetches targets = do
    +    let nullTensor = Raw.Tensor nullPtr
    +    -- Use mask to avoid leaking input tensors before they are passed to 'run'
    +    -- and output tensors before they are passed to 'createTensorData'.
    +    mask_ $
    +        -- Feeds
    +        withStringArrayLen (fst <$> feeds) $ \feedsLen feedNames ->
    +        mapM (createRawTensor . snd) feeds >>= \feedTensors ->
    +        withArrayLen feedTensors $ \_ cFeedTensors ->
    +        -- Fetches.
    +        withStringArrayLen fetches $ \fetchesLen fetchNames ->
    +        -- tensorOuts is an array of null Tensor pointers that will be filled
    +        -- by the call to Raw.run.
    +        withArrayLen (replicate fetchesLen nullTensor) $ \_ tensorOuts ->
    +        -- Targets.
    +        withStringArrayLen targets $ \targetsLen ctargets -> do
    +            checkStatus $ Raw.run
    +                session
    +                nullPtr
    +                feedNames cFeedTensors (safeConvert feedsLen)
    +                fetchNames tensorOuts (safeConvert fetchesLen)
    +                ctargets (safeConvert targetsLen)
    +                nullPtr
    +            mapM_ Raw.deleteTensor feedTensors
    +            outTensors <- peekArray fetchesLen tensorOuts
    +            mapM createTensorData outTensors
    +
    +
    +-- Internal.
    +
    +
    +-- | Same as 'fromIntegral', but throws an error if conversion is "lossy".
    +safeConvert ::
    +    forall a b. (Show a, Show b, Bits a, Bits b, Integral a, Integral b)
    +    => a -> b
    +safeConvert x =
    +    fromMaybe
    +    (error ("Failed to convert " ++ show x ++ ", got " ++
    +            show (fromIntegral x :: b)))
    +    (toIntegralSized x)
    +
    +
    +-- | Use a list of ByteString as a list of CString.
    +withStringList :: [B.ByteString] -> ([CString] -> IO a) -> IO a
    +withStringList strings fn = go strings []
    +  where
    +    go [] cs = fn (reverse cs)
    +    -- TODO(fmayle): Is it worth using unsafeAsCString here?
    +    go (x:xs) cs = B.useAsCString x $ \c -> go xs (c:cs)
    +
    +
    +-- | Use a list of ByteString as an array of CString.
    +withStringArrayLen :: [B.ByteString] -> (Int -> Ptr CString -> IO a) -> IO a
    +withStringArrayLen xs fn = withStringList xs (`withArrayLen` fn)
    +
    +
    +-- | Create a Raw.Tensor from a TensorData.
    +createRawTensor :: TensorData -> IO Raw.Tensor
    +createRawTensor (TensorData dims dt byteVec) =
    +    withArrayLen (map safeConvert dims) $ \cdimsLen cdims -> do
    +        let len = S.length byteVec
    +        dest <- mallocArray len
    +        S.unsafeWith byteVec $ \x -> copyArray dest x len
    +        Raw.newTensor (toEnum $ fromEnum dt)
    +                      cdims (safeConvert cdimsLen)
    +                      (castPtr dest) (safeConvert len)
    +                      tensorDeallocFunPtr nullPtr
    +
    +{-# NOINLINE tensorDeallocFunPtr #-}
    +tensorDeallocFunPtr :: FunPtr Raw.TensorDeallocFn
    +tensorDeallocFunPtr = unsafePerformIO $ Raw.wrapTensorDealloc $ \x _ _ -> free x
    +
    +-- | Create a TensorData from a Raw.Tensor.
    +--
    +-- Takes ownership of the Raw.Tensor.
    +-- TODO: Currently, it just makes a copy of the Tensor (and then deletes it),
    +-- since the raw pointer may refer to storage inside a mutable TensorFlow
    +-- variable.  We should avoid that copy when it's not needed; for example,
    +-- by making TensorData wrap an IOVector, and changing the code that uses it.
    +createTensorData :: Raw.Tensor -> IO TensorData
    +createTensorData t = do
    +    -- Read dimensions.
    +    numDims <- Raw.numDims t
    +    dims <- mapM (Raw.dim t) [0..numDims-1]
    +    -- Read type.
    +    dtype <- toEnum . fromEnum <$> Raw.tensorType t
    +    -- Read data.
    +    len <- safeConvert <$> Raw.tensorByteSize t
    +    bytes <- castPtr <$> Raw.tensorData t :: IO (Ptr Word8)
    +    fp <- newForeignPtr_ bytes
    +    -- Make an explicit copy of the raw data, since it might point
    +    -- to a mutable variable's memory.
    +    v <- S.freeze (M.unsafeFromForeignPtr0 fp len)
    +    Raw.deleteTensor t
    +    return $ TensorData (map safeConvert dims) dtype v
    +
    +-- | Runs the given action which does FFI calls updating a provided
    +-- status object. If the status is not OK it is thrown as
    +-- TensorFlowException.
    +checkStatus :: (Raw.Status -> IO a) -> IO a
    +checkStatus fn =
    +    bracket Raw.newStatus Raw.deleteStatus $ \status -> do
    +        result <- fn status
    +        code <- Raw.getCode status
    +        when (code /= Raw.TF_OK) $ do
    +            msg <- T.decodeUtf8With T.lenientDecode <$>
    +                   (Raw.message status >>= B.packCString)
    +            throwM $ TensorFlowException code msg
    +        return result
    +
    +setSessionConfig :: ConfigProto -> Raw.SessionOptions -> IO ()
    +setSessionConfig pb opt =
    +    useProtoAsVoidPtrLen pb $ \ptr len ->
    +        checkStatus (Raw.setConfig opt ptr len)
    +
    +setSessionTarget :: B.ByteString -> Raw.SessionOptions -> IO ()
    +setSessionTarget target = B.useAsCString target . Raw.setTarget
    +
    +-- | Serializes the given msg and provides it as (ptr,len) argument
    +-- to the given action.
    +useProtoAsVoidPtrLen :: (Message msg, Integral c, Show c, Bits c) =>
    +                        msg -> (Ptr b -> c -> IO a) -> IO a
    +useProtoAsVoidPtrLen msg f = B.useAsCStringLen (encodeMessage msg) $
    +        \(bytes, len) -> f (castPtr bytes) (safeConvert len)
    +
    +-- | Returns the serialized OpList of all OpDefs defined in this
    +-- address space.
    +getAllOpList :: IO B.ByteString
    +getAllOpList = do
    +    foreignPtr <-
    +        mask_ (newForeignPtr Raw.deleteBuffer =<< checkCall)
    +    -- Makes a copy because it is more reliable than eviscerating
    +    -- Buffer to steal its memory (including custom deallocator).
    +    withForeignPtr foreignPtr $
    +        \ptr -> B.packCStringLen =<< (,)
    +                <$> (castPtr <$> Raw.getBufferData ptr)
    +                <*> (safeConvert <$> Raw.getBufferLength ptr)
    +    where
    +      checkCall = do
    +          p <- Raw.getAllOpList
    +          when (p == nullPtr) (throwM exception)
    +          return p
    +      exception = TensorFlowException
    +                Raw.TF_UNKNOWN "GetAllOpList failure, check logs"
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.Raw.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.Raw.html new file mode 100644 index 0000000..be9d245 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.Raw.html @@ -0,0 +1,512 @@ +
    -- GENERATED by C->Haskell Compiler, version 0.28.1 Switcheroo, 1 April 2016 (Haskell)
    +-- Edit the ORIGNAL .chs file instead!
    +
    +
    +{-# LINE 1 "src/TensorFlow/Internal/Raw.chs" #-}
    +-- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE ForeignFunctionInterface #-}
    +
    +module TensorFlow.Internal.Raw where
    +import qualified Foreign.C.Types as C2HSImp
    +import qualified Foreign.Ptr as C2HSImp
    +import qualified Foreign.Storable as C2HSImp
    +
    +
    +
    +
    +
    +import Foreign
    +import Foreign.C
    +
    +data DataType = TF_FLOAT
    +              | TF_DOUBLE
    +              | TF_INT32
    +              | TF_UINT8
    +              | TF_INT16
    +              | TF_INT8
    +              | TF_STRING
    +              | TF_COMPLEX64
    +              | TF_COMPLEX
    +              | TF_INT64
    +              | TF_BOOL
    +              | TF_QINT8
    +              | TF_QUINT8
    +              | TF_QINT32
    +              | TF_BFLOAT16
    +              | TF_QINT16
    +              | TF_QUINT16
    +              | TF_UINT16
    +              | TF_COMPLEX128
    +              | TF_HALF
    +              | TF_RESOURCE
    +  deriving (Show,Eq)
    +instance Enum DataType where
    +  succ TF_FLOAT = TF_DOUBLE
    +  succ TF_DOUBLE = TF_INT32
    +  succ TF_INT32 = TF_UINT8
    +  succ TF_UINT8 = TF_INT16
    +  succ TF_INT16 = TF_INT8
    +  succ TF_INT8 = TF_STRING
    +  succ TF_STRING = TF_COMPLEX64
    +  succ TF_COMPLEX64 = TF_INT64
    +  succ TF_COMPLEX = TF_INT64
    +  succ TF_INT64 = TF_BOOL
    +  succ TF_BOOL = TF_QINT8
    +  succ TF_QINT8 = TF_QUINT8
    +  succ TF_QUINT8 = TF_QINT32
    +  succ TF_QINT32 = TF_BFLOAT16
    +  succ TF_BFLOAT16 = TF_QINT16
    +  succ TF_QINT16 = TF_QUINT16
    +  succ TF_QUINT16 = TF_UINT16
    +  succ TF_UINT16 = TF_COMPLEX128
    +  succ TF_COMPLEX128 = TF_HALF
    +  succ TF_HALF = TF_RESOURCE
    +  succ TF_RESOURCE = error "DataType.succ: TF_RESOURCE has no successor"
    +
    +  pred TF_DOUBLE = TF_FLOAT
    +  pred TF_INT32 = TF_DOUBLE
    +  pred TF_UINT8 = TF_INT32
    +  pred TF_INT16 = TF_UINT8
    +  pred TF_INT8 = TF_INT16
    +  pred TF_STRING = TF_INT8
    +  pred TF_COMPLEX64 = TF_STRING
    +  pred TF_COMPLEX = TF_STRING
    +  pred TF_INT64 = TF_COMPLEX64
    +  pred TF_BOOL = TF_INT64
    +  pred TF_QINT8 = TF_BOOL
    +  pred TF_QUINT8 = TF_QINT8
    +  pred TF_QINT32 = TF_QUINT8
    +  pred TF_BFLOAT16 = TF_QINT32
    +  pred TF_QINT16 = TF_BFLOAT16
    +  pred TF_QUINT16 = TF_QINT16
    +  pred TF_UINT16 = TF_QUINT16
    +  pred TF_COMPLEX128 = TF_UINT16
    +  pred TF_HALF = TF_COMPLEX128
    +  pred TF_RESOURCE = TF_HALF
    +  pred TF_FLOAT = error "DataType.pred: TF_FLOAT has no predecessor"
    +
    +  enumFromTo from to = go from
    +    where
    +      end = fromEnum to
    +      go v = case compare (fromEnum v) end of
    +                 LT -> v : go (succ v)
    +                 EQ -> [v]
    +                 GT -> []
    +
    +  enumFrom from = enumFromTo from TF_RESOURCE
    +
    +  fromEnum TF_FLOAT = 1
    +  fromEnum TF_DOUBLE = 2
    +  fromEnum TF_INT32 = 3
    +  fromEnum TF_UINT8 = 4
    +  fromEnum TF_INT16 = 5
    +  fromEnum TF_INT8 = 6
    +  fromEnum TF_STRING = 7
    +  fromEnum TF_COMPLEX64 = 8
    +  fromEnum TF_COMPLEX = 8
    +  fromEnum TF_INT64 = 9
    +  fromEnum TF_BOOL = 10
    +  fromEnum TF_QINT8 = 11
    +  fromEnum TF_QUINT8 = 12
    +  fromEnum TF_QINT32 = 13
    +  fromEnum TF_BFLOAT16 = 14
    +  fromEnum TF_QINT16 = 15
    +  fromEnum TF_QUINT16 = 16
    +  fromEnum TF_UINT16 = 17
    +  fromEnum TF_COMPLEX128 = 18
    +  fromEnum TF_HALF = 19
    +  fromEnum TF_RESOURCE = 20
    +
    +  toEnum 1 = TF_FLOAT
    +  toEnum 2 = TF_DOUBLE
    +  toEnum 3 = TF_INT32
    +  toEnum 4 = TF_UINT8
    +  toEnum 5 = TF_INT16
    +  toEnum 6 = TF_INT8
    +  toEnum 7 = TF_STRING
    +  toEnum 8 = TF_COMPLEX64
    +  toEnum 9 = TF_INT64
    +  toEnum 10 = TF_BOOL
    +  toEnum 11 = TF_QINT8
    +  toEnum 12 = TF_QUINT8
    +  toEnum 13 = TF_QINT32
    +  toEnum 14 = TF_BFLOAT16
    +  toEnum 15 = TF_QINT16
    +  toEnum 16 = TF_QUINT16
    +  toEnum 17 = TF_UINT16
    +  toEnum 18 = TF_COMPLEX128
    +  toEnum 19 = TF_HALF
    +  toEnum 20 = TF_RESOURCE
    +  toEnum unmatched = error ("DataType.toEnum: Cannot match " ++ show unmatched)
    +
    +{-# LINE 24 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +data Code = TF_OK
    +          | TF_CANCELLED
    +          | TF_UNKNOWN
    +          | TF_INVALID_ARGUMENT
    +          | TF_DEADLINE_EXCEEDED
    +          | TF_NOT_FOUND
    +          | TF_ALREADY_EXISTS
    +          | TF_PERMISSION_DENIED
    +          | TF_RESOURCE_EXHAUSTED
    +          | TF_FAILED_PRECONDITION
    +          | TF_ABORTED
    +          | TF_OUT_OF_RANGE
    +          | TF_UNIMPLEMENTED
    +          | TF_INTERNAL
    +          | TF_UNAVAILABLE
    +          | TF_DATA_LOSS
    +          | TF_UNAUTHENTICATED
    +  deriving (Show,Eq)
    +instance Enum Code where
    +  succ TF_OK = TF_CANCELLED
    +  succ TF_CANCELLED = TF_UNKNOWN
    +  succ TF_UNKNOWN = TF_INVALID_ARGUMENT
    +  succ TF_INVALID_ARGUMENT = TF_DEADLINE_EXCEEDED
    +  succ TF_DEADLINE_EXCEEDED = TF_NOT_FOUND
    +  succ TF_NOT_FOUND = TF_ALREADY_EXISTS
    +  succ TF_ALREADY_EXISTS = TF_PERMISSION_DENIED
    +  succ TF_PERMISSION_DENIED = TF_RESOURCE_EXHAUSTED
    +  succ TF_RESOURCE_EXHAUSTED = TF_FAILED_PRECONDITION
    +  succ TF_FAILED_PRECONDITION = TF_ABORTED
    +  succ TF_ABORTED = TF_OUT_OF_RANGE
    +  succ TF_OUT_OF_RANGE = TF_UNIMPLEMENTED
    +  succ TF_UNIMPLEMENTED = TF_INTERNAL
    +  succ TF_INTERNAL = TF_UNAVAILABLE
    +  succ TF_UNAVAILABLE = TF_DATA_LOSS
    +  succ TF_DATA_LOSS = TF_UNAUTHENTICATED
    +  succ TF_UNAUTHENTICATED = error "Code.succ: TF_UNAUTHENTICATED has no successor"
    +
    +  pred TF_CANCELLED = TF_OK
    +  pred TF_UNKNOWN = TF_CANCELLED
    +  pred TF_INVALID_ARGUMENT = TF_UNKNOWN
    +  pred TF_DEADLINE_EXCEEDED = TF_INVALID_ARGUMENT
    +  pred TF_NOT_FOUND = TF_DEADLINE_EXCEEDED
    +  pred TF_ALREADY_EXISTS = TF_NOT_FOUND
    +  pred TF_PERMISSION_DENIED = TF_ALREADY_EXISTS
    +  pred TF_RESOURCE_EXHAUSTED = TF_PERMISSION_DENIED
    +  pred TF_FAILED_PRECONDITION = TF_RESOURCE_EXHAUSTED
    +  pred TF_ABORTED = TF_FAILED_PRECONDITION
    +  pred TF_OUT_OF_RANGE = TF_ABORTED
    +  pred TF_UNIMPLEMENTED = TF_OUT_OF_RANGE
    +  pred TF_INTERNAL = TF_UNIMPLEMENTED
    +  pred TF_UNAVAILABLE = TF_INTERNAL
    +  pred TF_DATA_LOSS = TF_UNAVAILABLE
    +  pred TF_UNAUTHENTICATED = TF_DATA_LOSS
    +  pred TF_OK = error "Code.pred: TF_OK has no predecessor"
    +
    +  enumFromTo from to = go from
    +    where
    +      end = fromEnum to
    +      go v = case compare (fromEnum v) end of
    +                 LT -> v : go (succ v)
    +                 EQ -> [v]
    +                 GT -> []
    +
    +  enumFrom from = enumFromTo from TF_UNAUTHENTICATED
    +
    +  fromEnum TF_OK = 0
    +  fromEnum TF_CANCELLED = 1
    +  fromEnum TF_UNKNOWN = 2
    +  fromEnum TF_INVALID_ARGUMENT = 3
    +  fromEnum TF_DEADLINE_EXCEEDED = 4
    +  fromEnum TF_NOT_FOUND = 5
    +  fromEnum TF_ALREADY_EXISTS = 6
    +  fromEnum TF_PERMISSION_DENIED = 7
    +  fromEnum TF_RESOURCE_EXHAUSTED = 8
    +  fromEnum TF_FAILED_PRECONDITION = 9
    +  fromEnum TF_ABORTED = 10
    +  fromEnum TF_OUT_OF_RANGE = 11
    +  fromEnum TF_UNIMPLEMENTED = 12
    +  fromEnum TF_INTERNAL = 13
    +  fromEnum TF_UNAVAILABLE = 14
    +  fromEnum TF_DATA_LOSS = 15
    +  fromEnum TF_UNAUTHENTICATED = 16
    +
    +  toEnum 0 = TF_OK
    +  toEnum 1 = TF_CANCELLED
    +  toEnum 2 = TF_UNKNOWN
    +  toEnum 3 = TF_INVALID_ARGUMENT
    +  toEnum 4 = TF_DEADLINE_EXCEEDED
    +  toEnum 5 = TF_NOT_FOUND
    +  toEnum 6 = TF_ALREADY_EXISTS
    +  toEnum 7 = TF_PERMISSION_DENIED
    +  toEnum 8 = TF_RESOURCE_EXHAUSTED
    +  toEnum 9 = TF_FAILED_PRECONDITION
    +  toEnum 10 = TF_ABORTED
    +  toEnum 11 = TF_OUT_OF_RANGE
    +  toEnum 12 = TF_UNIMPLEMENTED
    +  toEnum 13 = TF_INTERNAL
    +  toEnum 14 = TF_UNAVAILABLE
    +  toEnum 15 = TF_DATA_LOSS
    +  toEnum 16 = TF_UNAUTHENTICATED
    +  toEnum unmatched = error ("Code.toEnum: Cannot match " ++ show unmatched)
    +
    +{-# LINE 25 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +
    +-- Status.
    +newtype Status = Status (C2HSImp.Ptr (Status))
    +{-# LINE 29 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +newStatus :: IO Status
    +newStatus = tFNewStatus
    +{-# LINE 32 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +deleteStatus :: Status -> IO ()
    +deleteStatus = tFDeleteStatus
    +{-# LINE 35 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +setStatus :: Status -> Code -> CString -> IO ()
    +setStatus s c = tFSetStatus s (fromIntegral $ fromEnum c)
    +
    +getCode :: Status -> IO Code
    +getCode s = toEnum . fromIntegral <$> tFGetCode s
    +
    +message :: Status -> IO CString
    +message = tFMessage
    +{-# LINE 44 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +
    +-- Buffer.
    +data Buffer
    +type BufferPtr = C2HSImp.Ptr (Buffer)
    +{-# LINE 49 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +getBufferData :: BufferPtr -> IO (Ptr ())
    +getBufferData = (\ptr -> do {C2HSImp.peekByteOff ptr 0 :: IO (C2HSImp.Ptr ())})
    +{-# LINE 52 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +getBufferLength :: BufferPtr -> IO CULong
    +getBufferLength =(\ptr -> do {C2HSImp.peekByteOff ptr 8 :: IO C2HSImp.CULong})
    +{-# LINE 55 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +-- Tensor.
    +newtype Tensor = Tensor (C2HSImp.Ptr (Tensor))
    +{-# LINE 58 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +instance Storable Tensor where
    +    sizeOf (Tensor t) = sizeOf t
    +    alignment (Tensor t) = alignment t
    +    peek p = fmap Tensor (peek (castPtr p))
    +    poke p (Tensor t) = poke (castPtr p) t
    +
    +-- A synonym for the int64_t type, which is used in the TensorFlow API.
    +-- On some platforms it's `long`; on others (e.g., Mac OS X) it's `long long`;
    +-- and as far as Haskell is concerned, those are distinct types (`CLong` vs
    +-- `CLLong`).
    +type CInt64 = (C2HSImp.CLong)
    +{-# LINE 70 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +newTensor :: DataType
    +          -> Ptr CInt64   -- dimensions array
    +          -> CInt         -- num dimensions
    +          -> Ptr ()       -- data
    +          -> CULong       -- data len
    +          -> FunPtr (Ptr () -> CULong -> Ptr () -> IO ())  -- deallocator
    +          -> Ptr ()       -- deallocator arg
    +          -> IO Tensor
    +newTensor dt = tFNewTensor (fromIntegral $ fromEnum dt)
    +
    +deleteTensor :: Tensor -> IO ()
    +deleteTensor = tFDeleteTensor
    +{-# LINE 83 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +tensorType :: Tensor -> IO DataType
    +tensorType t = toEnum . fromIntegral <$> tFTensorType t
    +
    +numDims :: Tensor -> IO CInt
    +numDims = tFNumDims
    +{-# LINE 89 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +dim :: Tensor -> CInt -> IO CInt64
    +dim = tFDim
    +{-# LINE 92 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +tensorByteSize :: Tensor -> IO CULong
    +tensorByteSize = tFTensorByteSize
    +{-# LINE 95 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +tensorData :: Tensor -> IO (Ptr ())
    +tensorData = tFTensorData
    +{-# LINE 98 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +
    +-- Session Options.
    +newtype SessionOptions = SessionOptions (C2HSImp.Ptr (SessionOptions))
    +{-# LINE 102 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +newSessionOptions :: IO SessionOptions
    +newSessionOptions = tFNewSessionOptions
    +{-# LINE 105 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +setTarget :: SessionOptions -> CString -> IO ()
    +setTarget = tFSetTarget
    +{-# LINE 108 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +setConfig :: SessionOptions -> Ptr () -> CULong -> Status -> IO ()
    +setConfig = tFSetConfig
    +{-# LINE 111 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +deleteSessionOptions :: SessionOptions -> IO ()
    +deleteSessionOptions = tFDeleteSessionOptions
    +{-# LINE 114 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +
    +-- Session.
    +newtype Session = Session (C2HSImp.Ptr (Session))
    +{-# LINE 118 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +newSession :: SessionOptions -> Status -> IO Session
    +newSession = tFNewDeprecatedSession
    +{-# LINE 121 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +closeSession :: Session -> Status -> IO ()
    +closeSession = tFCloseDeprecatedSession
    +{-# LINE 124 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +deleteSession :: Session -> Status -> IO ()
    +deleteSession = tFDeleteDeprecatedSession
    +{-# LINE 127 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +extendGraph :: Session -> Ptr () -> CULong -> Status -> IO ()
    +extendGraph = tFExtendGraph
    +{-# LINE 130 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +run :: Session
    +    -> BufferPtr                          -- RunOptions proto.
    +    -> Ptr CString -> Ptr Tensor -> CInt  -- Input (names, tensors, count).
    +    -> Ptr CString -> Ptr Tensor -> CInt  -- Output (names, tensors, count).
    +    -> Ptr CString -> CInt                -- Target nodes (names, count).
    +    -> BufferPtr                          -- RunMetadata proto.
    +    -> Status
    +    -> IO ()
    +run = tFRun
    +{-# LINE 140 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +-- FFI helpers.
    +type TensorDeallocFn = Ptr () -> CULong -> Ptr () -> IO ()
    +foreign import ccall "wrapper"
    +    wrapTensorDealloc :: TensorDeallocFn -> IO (FunPtr TensorDeallocFn)
    +
    +
    +-- | Get the OpList of all OpDefs defined in this address space.
    +-- Returns a BufferPtr, ownership of which is transferred to the caller
    +-- (and can be freed using deleteBuffer).
    +--
    +-- The data in the buffer will be the serialized OpList proto for ops registered
    +-- in this address space.
    +getAllOpList :: IO BufferPtr
    +getAllOpList = tFGetAllOpList
    +{-# LINE 155 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +foreign import ccall "&TF_DeleteBuffer"
    +  deleteBuffer :: FunPtr (BufferPtr -> IO ())
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewStatus"
    +  tFNewStatus :: (IO (Status))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteStatus"
    +  tFDeleteStatus :: ((Status) -> (IO ()))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_SetStatus"
    +  tFSetStatus :: ((Status) -> (C2HSImp.CInt -> ((C2HSImp.Ptr C2HSImp.CChar) -> (IO ()))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_GetCode"
    +  tFGetCode :: ((Status) -> (IO C2HSImp.CInt))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_Message"
    +  tFMessage :: ((Status) -> (IO (C2HSImp.Ptr C2HSImp.CChar)))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewTensor"
    +  tFNewTensor :: (C2HSImp.CInt -> ((C2HSImp.Ptr C2HSImp.CLong) -> (C2HSImp.CInt -> ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((C2HSImp.FunPtr ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((C2HSImp.Ptr ()) -> (IO ()))))) -> ((C2HSImp.Ptr ()) -> (IO (Tensor)))))))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteTensor"
    +  tFDeleteTensor :: ((Tensor) -> (IO ()))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_TensorType"
    +  tFTensorType :: ((Tensor) -> (IO C2HSImp.CInt))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NumDims"
    +  tFNumDims :: ((Tensor) -> (IO C2HSImp.CInt))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_Dim"
    +  tFDim :: ((Tensor) -> (C2HSImp.CInt -> (IO C2HSImp.CLong)))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_TensorByteSize"
    +  tFTensorByteSize :: ((Tensor) -> (IO C2HSImp.CULong))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_TensorData"
    +  tFTensorData :: ((Tensor) -> (IO (C2HSImp.Ptr ())))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewSessionOptions"
    +  tFNewSessionOptions :: (IO (SessionOptions))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_SetTarget"
    +  tFSetTarget :: ((SessionOptions) -> ((C2HSImp.Ptr C2HSImp.CChar) -> (IO ())))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_SetConfig"
    +  tFSetConfig :: ((SessionOptions) -> ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((Status) -> (IO ())))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteSessionOptions"
    +  tFDeleteSessionOptions :: ((SessionOptions) -> (IO ()))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewDeprecatedSession"
    +  tFNewDeprecatedSession :: ((SessionOptions) -> ((Status) -> (IO (Session))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_CloseDeprecatedSession"
    +  tFCloseDeprecatedSession :: ((Session) -> ((Status) -> (IO ())))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteDeprecatedSession"
    +  tFDeleteDeprecatedSession :: ((Session) -> ((Status) -> (IO ())))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_ExtendGraph"
    +  tFExtendGraph :: ((Session) -> ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((Status) -> (IO ())))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_Run"
    +  tFRun :: ((Session) -> ((BufferPtr) -> ((C2HSImp.Ptr (C2HSImp.Ptr C2HSImp.CChar)) -> ((C2HSImp.Ptr (Tensor)) -> (C2HSImp.CInt -> ((C2HSImp.Ptr (C2HSImp.Ptr C2HSImp.CChar)) -> ((C2HSImp.Ptr (Tensor)) -> (C2HSImp.CInt -> ((C2HSImp.Ptr (C2HSImp.Ptr C2HSImp.CChar)) -> (C2HSImp.CInt -> ((BufferPtr) -> ((Status) -> (IO ())))))))))))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_GetAllOpList"
    +  tFGetAllOpList :: (IO (BufferPtr))
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.VarInt.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.VarInt.html new file mode 100644 index 0000000..9241254 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Internal.VarInt.html @@ -0,0 +1,51 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE BangPatterns #-}
    +
    +{-|
    +Module      : TensorFlow.Internal.VarInt
    +Description : Encoders and decoders for varint types.
    +
    +Originally taken from internal proto-lens code.
    +-}
    +module TensorFlow.Internal.VarInt
    +    ( getVarInt
    +    , putVarInt
    +    ) where
    +
    +import Data.Attoparsec.ByteString as Parse
    +import Data.Bits
    +import Data.ByteString.Lazy.Builder as Builder
    +import Data.Monoid ((<>))
    +import Data.Word (Word64)
    +
    +-- | Decode an unsigned varint.
    +getVarInt :: Parser Word64
    +getVarInt = loop 1 0
    +  where
    +    loop !s !n = do
    +        b <- anyWord8
    +        let n' = n + s * fromIntegral (b .&. 127)
    +        if (b .&. 128) == 0
    +            then return n'
    +            else loop (128*s) n'
    +
    +-- | Encode a Word64.
    +putVarInt :: Word64 -> Builder
    +putVarInt n
    +    | n < 128 = Builder.word8 (fromIntegral n)
    +    | otherwise = Builder.word8 (fromIntegral $ n .&. 127 .|. 128)
    +                      <> putVarInt (n `shiftR` 7)
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Nodes.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Nodes.html new file mode 100644 index 0000000..feca89c --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Nodes.html @@ -0,0 +1,147 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE DataKinds #-}
    +{-# LANGUAGE FlexibleContexts #-}
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE MultiParamTypeClasses #-}
    +{-# LANGUAGE RankNTypes #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +{-# LANGUAGE TypeFamilies #-}
    +{-# LANGUAGE TypeOperators #-}
    +{-# LANGUAGE UndecidableInstances #-}  -- For Fetchable (TensorExpr a)
    +module TensorFlow.Nodes where
    +
    +import Control.Applicative (liftA2, liftA3)
    +import Data.Functor.Identity (Identity)
    +import Data.Map.Strict (Map)
    +import Data.Monoid ((<>))
    +import Data.Set (Set)
    +import Data.Text (Text)
    +import qualified Data.Map.Strict as Map
    +import qualified Data.Set as Set
    +
    +import TensorFlow.Build
    +import TensorFlow.Output
    +import TensorFlow.Tensor
    +import TensorFlow.Types
    +import qualified TensorFlow.Internal.FFI as FFI
    +
    +-- | Types that contain ops which can be run.
    +class Nodes t where
    +    getNodes :: t -> Build (Set NodeName)
    +
    +-- | Types that tensor representations (e.g. 'Tensor', 'ControlNode') can be
    +-- fetched into.
    +--
    +-- Includes collections of tensors (e.g. tuples).
    +class Nodes t => Fetchable t a where
    +    getFetch :: t -> Build (Fetch a)
    +
    +-- | Fetch action. Keeps track of what needs to be fetched and how to decode
    +-- the fetched data.
    +data Fetch a = Fetch
    +          { -- | Nodes to fetch
    +            fetches :: Set Text
    +            -- | Function to create an 'a' from the fetched data.
    +          , fetchRestore :: Map Text FFI.TensorData -> a
    +          }
    +
    +instance Functor Fetch where
    +    fmap f (Fetch fetch restore) = Fetch fetch (f . restore)
    +
    +instance Applicative Fetch where
    +    pure x = Fetch Set.empty (const x)
    +    Fetch fetch restore <*> Fetch fetch' restore' =
    +        Fetch (fetch <> fetch') (restore <*> restore')
    +
    +nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b
    +nodesUnion = fmap (foldMap id) . sequenceA
    +
    +instance (Nodes t1, Nodes t2) => Nodes (t1, t2) where
    +    getNodes (x, y) = nodesUnion [getNodes x, getNodes y]
    +
    +instance (Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) where
    +    getNodes (x, y, z) = nodesUnion [getNodes x, getNodes y, getNodes z]
    +
    +instance (Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) where
    +    getFetch (x, y) = liftA2 (,) <$> getFetch x <*> getFetch y
    +
    +instance (Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3)
    +         => Fetchable (t1, t2, t3) (a1, a2, a3) where
    +    getFetch (x, y, z) =
    +        liftA3 (,,) <$> getFetch x <*> getFetch y <*> getFetch z
    +
    +instance Nodes t => Nodes [t] where
    +    getNodes = nodesUnion . map getNodes
    +
    +instance Fetchable t a => Fetchable [t] [a] where
    +    getFetch ts  = sequenceA <$> mapM getFetch ts
    +
    +instance Nodes t => Nodes (Maybe t) where
    +    getNodes = nodesUnion . fmap getNodes
    +
    +instance Fetchable t a => Fetchable (Maybe t) (Maybe a) where
    +    getFetch = fmap sequenceA . mapM getFetch
    +
    +instance Nodes ControlNode where
    +    getNodes (ControlNode o) = pure $ Set.singleton o
    +
    +-- We use the constraint @(a ~ ())@ to help with type inference.  For example,
    +-- if @t :: ControlNode@, then this constraint ensures that @run t :: Session
    +-- ()@.  If we used @instance Fetchable ControlNode ()@ instead, then that
    +-- expression would be ambiguous without explicitly specifying the return type.
    +instance a ~ () => Fetchable ControlNode a where
    +    getFetch _ = return $ pure ()
    +
    +instance Nodes (ListOf f '[]) where
    +    getNodes _ = return Set.empty
    +
    +instance (Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f (a ': as)) where
    +    getNodes (x :/ xs) = liftA2 Set.union (getNodes x) (getNodes xs)
    +
    +instance l ~ List '[] => Fetchable (ListOf f '[]) l where
    +    getFetch _ = return $ pure Nil
    +
    +instance (Fetchable (f t) a, Fetchable (ListOf f ts) (List as), i ~ Identity)
    +    => Fetchable (ListOf f (t ': ts)) (ListOf i (a ': as)) where
    +    getFetch (x :/ xs) = liftA2 (\y ys -> y /:/ ys) <$> getFetch x <*> getFetch xs
    +
    +instance Nodes (Tensor v a) where
    +    getNodes (Tensor o) = Set.singleton . outputNodeName <$> toBuild o
    +
    +fetchTensorVector :: forall a v . (TensorType a)
    +                  => Tensor v a -> Build (Fetch (TensorData a))
    +fetchTensorVector (Tensor o) = do
    +    outputName <- encodeOutput <$> toBuild o
    +    pure $ Fetch (Set.singleton outputName) $ \tensors ->
    +        let tensorData = tensors Map.! outputName
    +            expectedType = tensorType (undefined :: a)
    +            actualType = FFI.tensorDataType tensorData
    +            badTypeError = error $ "Bad tensor type: expected "
    +                                   ++ show expectedType
    +                                   ++ ", got "
    +                                   ++ show actualType
    +        in if expectedType /= actualType
    +               then badTypeError
    +               else TensorData tensorData
    +
    +-- The constraint "a ~ a'" means that the input/output of fetch can constrain
    +-- the TensorType of each other.
    +instance (TensorType a, a ~ a') => Fetchable (Tensor v a) (TensorData a') where
    +    getFetch = fetchTensorVector
    +
    +instance (TensorType a, TensorDataType s a, a ~ a') => Fetchable (Tensor v a) (s a') where
    +    getFetch t = fmap decodeTensorData <$> fetchTensorVector t
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Output.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Output.html new file mode 100644 index 0000000..69142c0 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Output.html @@ -0,0 +1,128 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE GeneralizedNewtypeDeriving #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE Rank2Types #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +
    +module TensorFlow.Output
    +    ( ControlNode(..)
    +    , Device(..)
    +    -- * Ops
    +    , NodeName(..)
    +    , OpDef(..)
    +    , opName
    +    , opType
    +    , opAttr
    +    , opInputs
    +    , opControlInputs
    +    , OpType(..)
    +    , OutputIx(..)
    +    , Output(..)
    +    , output
    +    , PendingNodeName(..)
    +    )  where
    +
    +import qualified Data.Map.Strict as Map
    +import Data.String (IsString(..))
    +import Data.Text (Text)
    +import qualified Data.Text as Text
    +import Lens.Family2 (Lens')
    +import Lens.Family2.Unchecked (lens)
    +import Proto.Tensorflow.Core.Framework.AttrValue (AttrValue(..))
    +import Data.Default (def)
    +import TensorFlow.Types (Attribute, attrLens)
    +
    +-- | A type of graph node which has no outputs. These nodes are
    +-- valuable for causing side effects when they are run.
    +newtype ControlNode = ControlNode { unControlNode :: NodeName }
    +
    +-- | The type of op of a node in the graph.  This corresponds to the proto field
    +-- NodeDef.op.
    +newtype OpType = OpType { unOpType :: Text }
    +    deriving (Eq, Ord, Show)
    +
    +instance IsString OpType where
    +    fromString = OpType . Text.pack
    +
    +-- | An output of a TensorFlow node.
    +data Output = Output {outputIndex :: !OutputIx, outputNodeName :: !NodeName}
    +    deriving (Eq, Ord, Show)
    +
    +output :: OutputIx -> NodeName -> Output
    +output = Output
    +
    +newtype OutputIx = OutputIx { unOutputIx :: Int }
    +    deriving (Eq, Ord, Num, Enum, Show)
    +
    +-- | A device that a node can be assigned to.
    +-- There's a naming convention where the device names
    +-- are constructed from job and replica names.
    +newtype Device = Device {deviceName :: Text}
    +    deriving (Eq, Ord, IsString)
    +
    +instance Show Device where
    +    show (Device d) = show d
    +
    +-- | Op definition. This corresponds somewhat to the 'NodeDef' proto.
    +data OpDef = OpDef
    +    { _opName :: !PendingNodeName
    +    , _opType :: !OpType
    +    , _opAttrs :: !(Map.Map Text AttrValue)
    +    , _opInputs :: [Output]
    +    , _opControlInputs :: [NodeName]
    +    }  deriving (Eq, Ord)
    +
    +-- | The name specified for an unrendered Op.  If an Op has an
    +-- ImplicitName, it will be assigned based on the opType plus a
    +-- unique identifier.  Does not contain the "scope" prefix.
    +data PendingNodeName = ExplicitName !Text | ImplicitName
    +    deriving (Eq, Ord, Show)
    +
    +instance IsString PendingNodeName where
    +    fromString = ExplicitName . fromString
    +
    +-- | The name of a node in the graph.  This corresponds to the proto field
    +-- NodeDef.name.  Includes the scope prefix (if any) and a unique identifier
    +-- (if the node was implicitly named).
    +newtype NodeName = NodeName { unNodeName :: Text }
    +    deriving (Eq, Ord, Show)
    +
    +opName :: Lens' OpDef PendingNodeName
    +opName = lens _opName (\o x -> o {_opName = x})
    +
    +opType :: Lens' OpDef OpType
    +opType = lens _opType (\o x -> o { _opType = x})
    +
    +opAttr :: Attribute a => Text -> Lens' OpDef a
    +opAttr n = lens _opAttrs (\o x -> o {_opAttrs = x})
    +              . lens (Map.findWithDefault def n) (flip (Map.insert n))
    +              . attrLens
    +
    +opInputs :: Lens' OpDef [Output]
    +opInputs = lens _opInputs (\o x -> o {_opInputs = x})
    +
    +opControlInputs :: Lens' OpDef [NodeName]
    +opControlInputs = lens _opControlInputs (\o x -> o {_opControlInputs = x})
    +
    +-- TODO(gnezdo): IsString instance is weird and we should move that
    +-- code into a Build function
    +instance IsString Output where
    +    fromString s = case break (==':') s of
    +        (n, ':':ixStr) | [(ix, "" :: String)] <- read ixStr
    +                         -> Output (fromInteger ix) $ assigned n
    +        _ -> Output 0 $ assigned s
    +     where assigned = NodeName . Text.pack
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Session.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Session.html new file mode 100644 index 0000000..3be7e7c --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Session.html @@ -0,0 +1,218 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE GeneralizedNewtypeDeriving #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE Rank2Types #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +{-# LANGUAGE TupleSections #-}
    +
    +module TensorFlow.Session (
    +    Session,
    +    SessionT,
    +    Options,
    +    sessionConfig,
    +    sessionTarget,
    +    sessionTracer,
    +    runSession,
    +    runSessionWithOptions,
    +    MonadBuild(..),
    +    extend,
    +    addGraphDef,
    +    run,
    +    runWithFeeds,
    +    run_,
    +    runWithFeeds_,
    +    asyncProdNodes,
    +    ) where
    +
    +import Control.Monad (forever, unless, void)
    +import Control.Monad.Catch (MonadThrow, MonadCatch, MonadMask)
    +import Control.Monad.IO.Class (MonadIO, liftIO)
    +import Control.Monad.Trans.Class (MonadTrans, lift)
    +import Control.Monad.Trans.Reader (ReaderT(..), ask, asks)
    +import Data.ByteString (ByteString)
    +import Data.Default (Default, def)
    +import Data.Monoid ((<>))
    +import Data.ProtoLens (showMessage)
    +import Data.Set (Set)
    +import Data.Text.Encoding (encodeUtf8)
    +import Lens.Family2 (Lens', (^.), (&), (.~))
    +import Lens.Family2.Unchecked (lens)
    +import Proto.Tensorflow.Core.Framework.Graph (GraphDef, node)
    +import Proto.Tensorflow.Core.Protobuf.Config (ConfigProto)
    +import TensorFlow.Build
    +import TensorFlow.Nodes
    +import TensorFlow.Output (NodeName, unNodeName)
    +import TensorFlow.Tensor
    +
    +import qualified Data.ByteString.Builder as Builder
    +import qualified Data.Map.Strict as Map
    +import qualified Data.Set as Set
    +import qualified TensorFlow.Internal.FFI as FFI
    +
    +-- | An action for logging.
    +type Tracer = Builder.Builder -> IO ()
    +
    +-- Common state threaded through the session.
    +data SessionState
    +    = SessionState {
    +          rawSession :: FFI.Session
    +        , asyncCollector :: IO () -> IO ()
    +          -- ^ Starts the given action concurrently.
    +        , tracer :: Tracer
    +        }
    +
    +newtype SessionT m a
    +    = Session (ReaderT SessionState (BuildT m) a)
    +    deriving (Functor, Applicative, Monad, MonadIO, MonadThrow, MonadCatch,
    +              MonadMask)
    +
    +instance MonadTrans SessionT where
    +  lift = Session . lift . lift
    +
    +type Session = SessionT IO
    +
    +-- | Run 'Session' actions in a new TensorFlow session.
    +runSession :: (MonadMask m, MonadIO m) => SessionT m a -> m a
    +runSession = runSessionWithOptions def
    +
    +-- | Customization for session. Use the lenses to update:
    +-- 'sessionTarget', 'sessionTracer', 'sessionConfig'.
    +data Options = Options
    +    { _sessionTarget :: ByteString
    +    , _sessionConfig :: ConfigProto
    +    , _sessionTracer :: Tracer
    +    }
    +
    +instance Default Options where
    +    def = Options
    +          { _sessionTarget = ""
    +          , _sessionConfig = def
    +          , _sessionTracer = const (return ())
    +          }
    +
    +-- | Target can be: "local", ip:port, host:port.
    +-- The set of supported factories depends on the linked in libraries.
    +sessionTarget :: Lens' Options ByteString
    +sessionTarget = lens _sessionTarget (\g x -> g { _sessionTarget = x })
    +
    +-- | Uses the specified config for the created session.
    +sessionConfig :: Lens' Options ConfigProto
    +sessionConfig = lens _sessionConfig (\g x -> g { _sessionConfig = x })
    +
    +-- | Uses the given logger to monitor session progress.
    +sessionTracer :: Lens' Options Tracer
    +sessionTracer = lens _sessionTracer (\g x -> g { _sessionTracer = x })
    +
    +-- | Run 'Session' actions in a new TensorFlow session created with
    +-- the given option setter actions ('sessionTarget', 'sessionConfig').
    +runSessionWithOptions :: (MonadMask m, MonadIO m) => Options -> SessionT m a -> m a
    +runSessionWithOptions options (Session m) =
    +    FFI.withSession applyOptions $
    +        \as rs ->
    +            let initState = SessionState rs as (options ^. sessionTracer)
    +            in evalBuildT (runReaderT m initState)
    +  where applyOptions opt = do
    +            FFI.setSessionTarget (options ^. sessionTarget) opt
    +            FFI.setSessionConfig (options ^. sessionConfig) opt
    +
    +instance Monad m => MonadBuild (SessionT m) where
    +    build = Session . lift . build
    +
    +-- | Add all pending rendered nodes to the TensorFlow graph and runs
    +-- any pending initializers.
    +--
    +-- Note that run, runWithFeeds, etc. will all call this function implicitly.
    +extend :: MonadIO m => SessionT m ()
    +extend = do
    +    session <- Session (asks rawSession)
    +    trace <- Session (asks tracer)
    +    nodesToExtend <- build flushNodeBuffer
    +    unless (null nodesToExtend) $ liftIO $ do
    +        let graphDef = (def :: GraphDef) & node .~ nodesToExtend
    +        trace ("Session.extend " <> Builder.string8 (showMessage graphDef))
    +        FFI.extendGraph session graphDef
    +    -- Now that all the nodes are created, run the initializers.
    +    initializers <- build flushInitializers
    +    unless (null initializers) $
    +        void $ liftIO $ FFI.run session [] [] (toNodeNames initializers)
    +
    +-- | Run a subgraph 't', rendering any dependent nodes that aren't already
    +-- rendered, and fetch the corresponding values for 'a'.
    +run :: (MonadIO m, Fetchable t a) => t -> SessionT m a
    +run = runWithFeeds []
    +
    +-- | Run a subgraph 't', rendering any dependent nodes that aren't already
    +-- rendered, feed the given input values, and fetch the corresponding result
    +-- values for 'a'.
    +runWithFeeds :: (MonadIO m, Fetchable t a) => [Feed] -> t -> SessionT m a
    +runWithFeeds feeds t = do
    +    ns <- build $ getNodes t
    +    -- Note that this call to "fetch" shouldn't affect the following "extend"
    +    -- call, since all nodes in t and its inputs/deps will be rendered by the
    +    -- above call to getNodes.
    +    fetch <- build $ getFetch t
    +    runFetchWithFeeds feeds ns fetch
    +
    +runFetchWithFeeds :: MonadIO m => [Feed] -> Set NodeName -> Fetch a -> SessionT m a
    +runFetchWithFeeds feeds target (Fetch fetch restore) = do
    +    extend
    +    let feeds' = fixFeeds feeds
    +    let fetchNames = encodeUtf8 <$> Set.toList fetch
    +        targetNames = toNodeNames $ Set.toList target
    +    session <- Session (asks rawSession)
    +    runResult <- liftIO $ FFI.run session
    +                                  feeds'
    +                                  fetchNames
    +                                  targetNames
    +    let resultTensorsMap = Map.fromList $ zip (Set.toList fetch) runResult
    +    return $ restore resultTensorsMap
    +
    +toNodeNames :: [NodeName] -> [ByteString]
    +toNodeNames = map (encodeUtf8 . unNodeName)
    +
    +-- | Run a subgraph 't', rendering and extending any dependent nodes that aren't
    +-- already rendered.  This behaves like 'run' except that it doesn't do any
    +-- fetches.
    +run_ :: (MonadIO m, Nodes t) => t -> SessionT m ()
    +run_ = runWithFeeds_ []
    +
    +-- | Run a subgraph 't', rendering any dependent nodes that aren't already
    +-- rendered, feed the given input values, and fetch the corresponding result
    +-- values for 'a'.  This behaves like 'runWithFeeds' except that it doesn't do
    +-- any fetches.
    +runWithFeeds_ :: (MonadIO m, Nodes t) => [Feed] -> t -> SessionT m ()
    +runWithFeeds_ feeds t = do
    +    ns <- build $ getNodes t
    +    runFetchWithFeeds feeds ns (pure ())
    +
    +fixFeeds :: [Feed] -> [(ByteString, FFI.TensorData)]
    +fixFeeds = map $ \(Feed o d) -> (encodeUtf8 $ encodeOutput o, d)
    +
    +-- | Starts a concurrent thread which evaluates the given Nodes
    +-- forever until runSession exits or an exception occurs. Graph
    +-- extension happens synchronously, but the resultant run proceeds as
    +-- a separate thread.
    +asyncProdNodes :: (MonadIO m, Nodes t)
    +                  => t  -- ^ Node to evaluate concurrently.
    +                  -> SessionT m ()
    +asyncProdNodes nodes = do
    +    target <- build (getNodes nodes)
    +    extend
    +    let targetNames = toNodeNames $ Set.toList target
    +    state <- Session ask
    +    let loop = forever (void (FFI.run (rawSession state) [] [] targetNames))
    +    liftIO (asyncCollector state loop)
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Tensor.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Tensor.html new file mode 100644 index 0000000..16615e6 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Tensor.html @@ -0,0 +1,201 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE DataKinds #-}
    +{-# LANGUAGE FlexibleContexts #-}
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE FunctionalDependencies #-}
    +{-# LANGUAGE GADTs #-}
    +{-# LANGUAGE DeriveFunctor #-}
    +{-# LANGUAGE KindSignatures #-}
    +{-# LANGUAGE MultiParamTypeClasses #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE Rank2Types #-}
    +{-# LANGUAGE TypeFamilies #-}
    +{-# LANGUAGE TypeOperators #-}
    +{-# LANGUAGE UndecidableInstances #-}  -- For the Render class
    +
    +module TensorFlow.Tensor where
    +
    +import Data.ByteString (ByteString)
    +import Data.String (IsString(..))
    +import qualified Data.Text as Text
    +import Lens.Family2 ((^.))
    +import Lens.Family2.State ((%=), use)
    +
    +import Proto.Tensorflow.Core.Framework.NodeDef (device)
    +import TensorFlow.Build
    +import TensorFlow.Output (Output, NodeName, outputNodeName, Device(..))
    +import TensorFlow.Types
    +    ( TensorType
    +    , TensorData(..)
    +    , ListOf(..)
    +    )
    +import qualified TensorFlow.Internal.FFI as FFI
    +
    +-- | A named output of a TensorFlow operation.
    +--
    +-- The type parameter @a@ is the type of the elements in the 'Tensor'.  The
    +-- parameter @v@ is either:
    +--
    +--   * 'Build': An unrendered, immutable value.
    +--   * 'Value': A rendered, immutable value.
    +--   * 'Ref': A rendered stateful handle (e.g., a variable).
    +--
    +-- Note that 'expr', 'value', 'render' and 'renderValue' can help convert between
    +-- the different types of 'Tensor'.
    +data Tensor v a where
    +    Tensor :: TensorKind v => {tensorOutput :: v Output} -> Tensor v a
    +
    +newtype Value a = Value {runValue :: a}
    +    deriving Functor
    +
    +instance Applicative Value where
    +    pure = Value
    +    Value f <*> Value x = Value $ f x
    +
    +instance Monad Value where
    +    f >>= g = g $ runValue f
    +
    +newtype Ref a = Ref {runRef :: a}
    +    deriving Functor
    +
    +instance Applicative Ref where
    +    pure = Ref
    +    Ref f <*> Ref x = Ref $ f x
    +
    +instance Monad Ref where
    +    f >>= g = g $ runRef f
    +
    +-- | Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op.
    +value :: Tensor Ref a -> Tensor Value a
    +value (Tensor o) = Tensor $ Value $ runRef o
    +
    +renderValue :: MonadBuild m => Tensor v a -> m (Tensor Value a)
    +renderValue (Tensor o) = render $ Tensor $ toBuild o
    +
    +-- | A pair of a 'Tensor' and some data that should be fed into that 'Tensor'
    +-- when running the graph.
    +data Feed = Feed Output FFI.TensorData
    +
    +-- | A class ensuring that a given tensor is rendered, i.e., has a fixed
    +-- name, device, etc.
    +class Rendered t where
    +    renderedOutput :: t a -> Output
    +
    +instance Rendered (Tensor Value) where
    +    renderedOutput = runValue . tensorOutput
    +
    +instance Rendered (Tensor Ref) where
    +    renderedOutput = runRef . tensorOutput
    +
    +tensorNodeName :: Rendered t => t a -> NodeName
    +tensorNodeName = outputNodeName . renderedOutput
    +
    +
    +-- | Create a 'Feed' for feeding the given data into a 'Tensor' when running
    +-- the graph.
    +--
    +-- Note that if a 'Tensor' is rendered, its identity may change; so feeding the
    +-- rendered 'Tensor' may be different than feeding the original 'Tensor'.
    +feed :: Rendered t => t a -> TensorData a -> Feed
    +feed t (TensorData td) = Feed (renderedOutput t) td
    +
    +-- | Create a 'Tensor' for a given name.  This can be used to reference nodes
    +-- in a 'GraphDef' that was loaded via 'addGraphDef'.
    +-- TODO(judahjacobson): add more safety checks here.
    +tensorFromName :: TensorKind v => Text.Text -> Tensor v a
    +tensorFromName = Tensor . pure . fromString . Text.unpack
    +
    +-- | Like 'tensorFromName', but type-restricted to 'Value'.
    +tensorValueFromName :: Text.Text -> Tensor Value a
    +tensorValueFromName = tensorFromName
    +
    +-- | Like 'tensorFromName', but type-restricted to 'Ref'.
    +tensorRefFromName :: Text.Text -> Tensor Ref a
    +tensorRefFromName = tensorFromName
    +
    +type TensorList v = ListOf (Tensor v)
    +
    +tensorListOutputs :: Rendered (Tensor v) => TensorList v as -> [Output]
    +tensorListOutputs Nil = []
    +tensorListOutputs (t :/ ts) = renderedOutput t : tensorListOutputs ts
    +
    +-- | Places all nodes rendered in the given 'Build' action on the same
    +-- device as the given Tensor (see also 'withDevice'). Make sure that
    +-- the action has side effects of rendering the desired tensors. A pure
    +-- return would not have the desired effect.
    +colocateWith :: (MonadBuild m, Rendered t) => t b -> m a -> m a
    +colocateWith t x = do
    +    d <- build $ Device . (^. device)
    +               <$> lookupNode (outputNodeName $ renderedOutput t)
    +    withDevice (Just d) x
    +
    +
    +-- | Render a 'Tensor', fixing its name, scope, device and control inputs from
    +-- the 'MonadBuild' context.  Also renders any dependencies of the 'Tensor' that
    +-- weren't already rendered.
    +--
    +-- This operation is idempotent; calling 'render' on the same input in the same
    +-- context will produce the same result.  However, rendering the same
    +-- @Tensor Build@ in two different contexts may result in two different
    +-- @Tensor Value@s.
    +render :: MonadBuild m => Tensor Build a -> m (Tensor Value a)
    +render (Tensor t) = Tensor . Value <$> build t
    +
    +-- TODO: better name.
    +expr :: TensorKind v => Tensor v a -> Tensor Build a
    +expr (Tensor o) = Tensor $ toBuild o
    +
    +-- | Records the given summary action in Build for retrieval with
    +-- Summary protocol buffer in string form. For safety, use the
    +-- pre-composed functions: Logging.scalarSummary and
    +-- Logging.histogramSummary.
    +addSummary :: (MonadBuild m, TensorKind v) => Tensor v ByteString -- ^ A 'SummaryTensor'
    +                        -> m ()
    +addSummary t = build $ do
    +    -- TODO: more generic way
    +    o <- toBuild $ tensorOutput t
    +    summaries %= (o :)
    +
    +-- | Retrieves the summary ops collected thus far. Typically this only
    +-- happens once, but if 'TensorFlow.Session.buildWithSummary' is used
    +-- repeatedly, the values accumulate.
    +collectAllSummaries :: MonadBuild m => m [SummaryTensor]
    +collectAllSummaries = build $ map (Tensor . Value) <$> use summaries
    +
    +-- | Synonym for the tensors that return serialized Summary proto.
    +type SummaryTensor = Tensor Value ByteString
    +
    +-- | An internal class for kinds of Tensors.
    +class Monad v => TensorKind v where
    +    toBuild :: v a -> Build a
    +
    +instance TensorKind Value where
    +    toBuild = return . runValue
    +
    +instance TensorKind Ref where
    +    toBuild = return . runRef
    +
    +instance TensorKind Build where
    +    toBuild = id
    +
    +
    +-- | Types which can be converted to `Tensor`.
    +class ToTensor t where
    +    toTensor :: TensorType a => t a -> Tensor Build a
    +
    +instance TensorKind v => ToTensor (Tensor v) where
    +    toTensor = expr
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Types.html b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Types.html new file mode 100644 index 0000000..36b8002 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/TensorFlow.Types.html @@ -0,0 +1,566 @@ +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE ConstraintKinds #-}
    +{-# LANGUAGE CPP #-}
    +{-# LANGUAGE DataKinds #-}
    +{-# LANGUAGE FlexibleContexts #-}
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE GADTs #-}
    +{-# LANGUAGE GeneralizedNewtypeDeriving #-}
    +{-# LANGUAGE MonoLocalBinds #-}
    +{-# LANGUAGE MultiParamTypeClasses #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE RankNTypes #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +{-# LANGUAGE TypeFamilies #-}
    +{-# LANGUAGE TypeOperators #-}
    +-- We use UndecidableInstances for type families with recursive definitions
    +-- like "\\".  Those instances will terminate since each equation unwraps one
    +-- cons cell of a type-level list.
    +{-# LANGUAGE UndecidableInstances #-}
    +
    +module TensorFlow.Types
    +    ( TensorType(..)
    +    , TensorData(..)
    +    , TensorDataType(..)
    +    , Scalar(..)
    +    , Shape(..)
    +    , protoShape
    +    , Attribute(..)
    +    , DataType(..)
    +    , ResourceHandle
    +    -- * Lists
    +    , ListOf(..)
    +    , List
    +    , (/:/)
    +    , TensorTypeProxy(..)
    +    , TensorTypes(..)
    +    , TensorTypeList
    +    , fromTensorTypeList
    +    , fromTensorTypes
    +    -- * Type constraints
    +    , OneOf
    +    , type (/=)
    +    , OneOfs
    +    -- ** Implementation of constraints
    +    , TypeError
    +    , ExcludedCase
    +    , NoneOf
    +    , type (\\)
    +    , Delete
    +    , AllTensorTypes
    +    ) where
    +
    +import Data.Functor.Identity (Identity(..))
    +import Data.Complex (Complex)
    +import Data.Default (def)
    +import Data.Int (Int8, Int16, Int32, Int64)
    +import Data.Monoid ((<>))
    +import Data.Proxy (Proxy(..))
    +import Data.String (IsString)
    +import Data.Word (Word8, Word16, Word64)
    +import Foreign.Storable (Storable)
    +import GHC.Exts (Constraint, IsList(..))
    +import Lens.Family2 (Lens', view, (&), (.~))
    +import Lens.Family2.Unchecked (iso)
    +import Text.Printf (printf)
    +import qualified Data.Attoparsec.ByteString as Atto
    +import Data.ByteString (ByteString)
    +import qualified Data.ByteString as B
    +import Data.ByteString.Builder (Builder)
    +import qualified Data.ByteString.Builder as Builder
    +import qualified Data.ByteString.Lazy as L
    +import qualified Data.Vector as V
    +import qualified Data.Vector.Storable as S
    +import Proto.Tensorflow.Core.Framework.AttrValue
    +    ( AttrValue(..)
    +    , AttrValue'ListValue(..)
    +    , b
    +    , f
    +    , i
    +    , s
    +    , list
    +    , type'
    +    , shape
    +    , tensor
    +    )
    +import Proto.Tensorflow.Core.Framework.ResourceHandle
    +    (ResourceHandleProto)
    +import Proto.Tensorflow.Core.Framework.Tensor as Tensor
    +    ( TensorProto(..)
    +    , boolVal
    +    , doubleVal
    +    , floatVal
    +    , intVal
    +    , int64Val
    +    , resourceHandleVal
    +    , stringVal
    +    , stringVal
    +    )
    +import Proto.Tensorflow.Core.Framework.TensorShape
    +    ( TensorShapeProto(..)
    +    , dim
    +    , size
    +    )
    +import Proto.Tensorflow.Core.Framework.Types (DataType(..))
    +
    +import TensorFlow.Internal.VarInt (getVarInt, putVarInt)
    +import qualified TensorFlow.Internal.FFI as FFI
    +
    +type ResourceHandle = ResourceHandleProto
    +
    +-- | The class of scalar types supported by tensorflow.
    +class TensorType a where
    +    tensorType :: a -> DataType
    +    tensorRefType :: a -> DataType
    +    tensorVal :: Lens' TensorProto [a]
    +
    +instance TensorType Float where
    +    tensorType _ = DT_FLOAT
    +    tensorRefType _ = DT_FLOAT_REF
    +    tensorVal = floatVal
    +
    +instance TensorType Double where
    +    tensorType _ = DT_DOUBLE
    +    tensorRefType _ = DT_DOUBLE_REF
    +    tensorVal = doubleVal
    +
    +instance TensorType Int32 where
    +    tensorType _ = DT_INT32
    +    tensorRefType _ = DT_INT32_REF
    +    tensorVal = intVal
    +
    +instance TensorType Int64 where
    +    tensorType _ = DT_INT64
    +    tensorRefType _ = DT_INT64_REF
    +    tensorVal = int64Val
    +
    +integral :: Integral a => Lens' [Int32] [a]
    +integral = iso (fmap fromIntegral) (fmap fromIntegral)
    +
    +instance TensorType Word8 where
    +    tensorType _ = DT_UINT8
    +    tensorRefType _ = DT_UINT8_REF
    +    tensorVal = intVal . integral
    +
    +instance TensorType Word16 where
    +    tensorType _ = DT_UINT16
    +    tensorRefType _ = DT_UINT16_REF
    +    tensorVal = intVal . integral
    +
    +instance TensorType Int16 where
    +    tensorType _ = DT_INT16
    +    tensorRefType _ = DT_INT16_REF
    +    tensorVal = intVal . integral
    +
    +instance TensorType Int8 where
    +    tensorType _ = DT_INT8
    +    tensorRefType _ = DT_INT8_REF
    +    tensorVal = intVal . integral
    +
    +instance TensorType ByteString where
    +    tensorType _ = DT_STRING
    +    tensorRefType _ = DT_STRING_REF
    +    tensorVal = stringVal
    +
    +instance TensorType Bool where
    +    tensorType _ = DT_BOOL
    +    tensorRefType _ = DT_BOOL_REF
    +    tensorVal = boolVal
    +
    +instance TensorType (Complex Float) where
    +    tensorType _ = DT_COMPLEX64
    +    tensorRefType _ = DT_COMPLEX64
    +    tensorVal = error "TODO (Complex Float)"
    +
    +instance TensorType (Complex Double) where
    +    tensorType _ = DT_COMPLEX128
    +    tensorRefType _ = DT_COMPLEX128
    +    tensorVal = error "TODO (Complex Double)"
    +
    +instance TensorType ResourceHandle where
    +    tensorType _ = DT_RESOURCE
    +    tensorRefType _ = DT_RESOURCE_REF
    +    tensorVal = resourceHandleVal
    +
    +-- | Tensor data with the correct memory layout for tensorflow.
    +newtype TensorData a = TensorData { unTensorData :: FFI.TensorData }
    +
    +-- | Types that can be converted to and from 'TensorData'.
    +--
    +-- 'S.Vector' is the most efficient to encode/decode for most element types.
    +class TensorType a => TensorDataType s a where
    +    -- | Decode the bytes of a 'TensorData' into an 's'.
    +    decodeTensorData :: TensorData a -> s a
    +    -- | Encode an 's' into a 'TensorData'.
    +    --
    +    -- The values should be in row major order, e.g.,
    +    --
    +    --   element 0:   index (0, ..., 0)
    +    --   element 1:   index (0, ..., 1)
    +    --   ...
    +    encodeTensorData :: Shape -> s a -> TensorData a
    +
    +-- All types, besides ByteString and Bool, are encoded as simple arrays and we
    +-- can use Vector.Storable to encode/decode by type casting pointers.
    +
    +-- TODO(fmayle): Assert that the data type matches the return type.
    +simpleDecode :: Storable a => TensorData a -> S.Vector a
    +simpleDecode = S.unsafeCast . FFI.tensorDataBytes . unTensorData
    +
    +simpleEncode :: forall a . (TensorType a, Storable a)
    +             => Shape -> S.Vector a -> TensorData a
    +simpleEncode (Shape xs) v =
    +    if product xs /= fromIntegral (S.length v)
    +        then error $ printf
    +            "simpleEncode: bad vector length for shape %v: expected=%d got=%d"
    +            (show xs) (product xs) (S.length v)
    +        else TensorData (FFI.TensorData xs dt (S.unsafeCast v))
    +  where
    +    dt = tensorType (undefined :: a)
    +
    +instance TensorDataType S.Vector Float where
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorDataType S.Vector Double where
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorDataType S.Vector Int8 where
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorDataType S.Vector Int16 where
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorDataType S.Vector Int32 where
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorDataType S.Vector Int64 where
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorDataType S.Vector Word8 where
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorDataType S.Vector Word16 where
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +-- TODO: Haskell and tensorflow use different byte sizes for bools, which makes
    +-- encoding more expensive. It may make sense to define a custom boolean type.
    +instance TensorDataType S.Vector Bool where
    +    decodeTensorData =
    +        S.convert . S.map (/= 0) . FFI.tensorDataBytes . unTensorData
    +    encodeTensorData (Shape xs) =
    +        TensorData . FFI.TensorData xs DT_BOOL . S.map fromBool . S.convert
    +      where
    +        fromBool x = if x then 1 else 0 :: Word8
    +
    +instance {-# OVERLAPPABLE #-} (Storable a, TensorDataType S.Vector a, TensorType a)
    +    => TensorDataType V.Vector a where
    +    decodeTensorData = (S.convert :: S.Vector a -> V.Vector a) . decodeTensorData
    +    encodeTensorData x = encodeTensorData x . (S.convert :: V.Vector a -> S.Vector a)
    +
    +instance {-# OVERLAPPING #-} TensorDataType V.Vector (Complex Float) where
    +    decodeTensorData = error "TODO (Complex Float)"
    +    encodeTensorData = error "TODO (Complex Float)"
    +
    +instance {-# OVERLAPPING #-} TensorDataType V.Vector (Complex Double) where
    +    decodeTensorData = error "TODO (Complex Double)"
    +    encodeTensorData = error "TODO (Complex Double)"
    +
    +instance {-# OVERLAPPING #-} TensorDataType V.Vector ByteString where
    +    -- Encoded data layout (described in third_party/tensorflow/c/c_api.h):
    +    --   table offsets for each element :: [Word64]
    +    --   at each element offset:
    +    --     string length :: VarInt64
    +    --     string data   :: [Word8]
    +    decodeTensorData tensorData =
    +        either (\err -> error $ "Malformed TF_STRING tensor; " ++ err) id $
    +            if expected /= count
    +                then Left $ "decodeTensorData for ByteString count mismatch " ++
    +                            show (expected, count)
    +                else V.mapM decodeString (S.convert offsets)
    +      where
    +        expected = S.length offsets
    +        count = fromIntegral $ product $ FFI.tensorDataDimensions
    +                    $ unTensorData tensorData
    +        bytes = FFI.tensorDataBytes $ unTensorData tensorData
    +        offsets = S.take count $ S.unsafeCast bytes :: S.Vector Word64
    +        dataBytes = B.pack $ S.toList $ S.drop (count * 8) bytes
    +        decodeString :: Word64 -> Either String ByteString
    +        decodeString offset =
    +            let stringDataStart = B.drop (fromIntegral offset) dataBytes
    +            in Atto.eitherResult $ Atto.parse stringParser stringDataStart
    +        stringParser :: Atto.Parser ByteString
    +        stringParser = getVarInt >>= Atto.take . fromIntegral
    +    encodeTensorData (Shape xs) vec =
    +        TensorData $ FFI.TensorData xs dt byteVector
    +      where
    +        dt = tensorType (undefined :: ByteString)
    +        -- Add a string to an offset table and data blob.
    +        addString :: (Builder, Builder, Word64)
    +                  -> ByteString
    +                  -> (Builder, Builder, Word64)
    +        addString (table, strings, offset) str =
    +            ( table <> Builder.word64LE offset
    +            , strings <> lengthBytes <> Builder.byteString str
    +            , offset + lengthBytesLen + strLen
    +            )
    +          where
    +            strLen = fromIntegral $ B.length str
    +            lengthBytes = putVarInt $ fromIntegral $ B.length str
    +            lengthBytesLen =
    +                fromIntegral $ L.length $ Builder.toLazyByteString lengthBytes
    +        -- Encode all strings.
    +        (table', strings', _) = V.foldl' addString (mempty, mempty, 0) vec
    +        -- Concat offset table with data.
    +        bytes = table' <> strings'
    +        -- Convert to Vector Word8.
    +        byteVector = S.fromList $ L.unpack $ Builder.toLazyByteString bytes
    +
    +newtype Scalar a = Scalar {unScalar :: a}
    +    deriving (Show, Eq, Ord, Num, Fractional, Floating, Real, RealFloat,
    +              RealFrac, IsString)
    +
    +instance (TensorDataType V.Vector a, TensorType a) => TensorDataType Scalar a where
    +    decodeTensorData = Scalar . headFromSingleton . decodeTensorData
    +    encodeTensorData x (Scalar y) = encodeTensorData x (V.fromList [y])
    +
    +headFromSingleton :: V.Vector a -> a
    +headFromSingleton x
    +    | V.length x == 1 = V.head x
    +    | otherwise = error $
    +                  "Unable to extract singleton from tensor of length "
    +                  ++ show (V.length x)
    +
    +
    +-- | Shape (dimensions) of a tensor.
    +newtype Shape = Shape [Int64] deriving Show
    +
    +instance IsList Shape where
    +    type Item Shape = Int64
    +    fromList = Shape . fromList
    +    toList (Shape ss) = toList ss
    +
    +protoShape :: Lens' TensorShapeProto Shape
    +protoShape = iso protoToShape shapeToProto
    +  where
    +    protoToShape = Shape . fmap (view size) . view dim
    +    shapeToProto (Shape ds) = (def :: TensorShapeProto) & dim .~ fmap (\d -> def & size .~ d) ds
    +
    +
    +class Attribute a where
    +    attrLens :: Lens' AttrValue a
    +
    +instance Attribute Float where
    +    attrLens = f
    +
    +instance Attribute ByteString where
    +    attrLens = s
    +
    +instance Attribute Int64 where
    +    attrLens = i
    +
    +instance Attribute DataType where
    +    attrLens = type'
    +
    +instance Attribute TensorProto where
    +    attrLens = tensor
    +
    +instance Attribute Bool where
    +    attrLens = b
    +
    +instance Attribute Shape where
    +    attrLens = shape . protoShape
    +
    +-- TODO(gnezdo): support generating list(Foo) from [Foo].
    +instance Attribute AttrValue'ListValue where
    +    attrLens = list
    +
    +instance Attribute [DataType] where
    +    attrLens = list . type'
    +
    +instance Attribute [Int64] where
    +    attrLens = list . i
    +
    +-- | A heterogeneous list type.
    +data ListOf f as where
    +    Nil :: ListOf f '[]
    +    (:/) :: f a -> ListOf f as -> ListOf f (a ': as)
    +
    +infixr 5 :/
    +
    +type family All f as :: Constraint where
    +    All f '[] = ()
    +    All f (a ': as) = (f a, All f as)
    +
    +type family Map f as where
    +    Map f '[] = '[]
    +    Map f (a ': as) = f a ': Map f as
    +
    +instance All Eq (Map f as) => Eq (ListOf f as) where
    +    Nil == Nil = True
    +    (x :/ xs) == (y :/ ys) = x == y && xs == ys
    +    -- Newer versions of GHC use the GADT to tell that the previous cases are
    +    -- exhaustive.
    +#if __GLASGOW_HASKELL__ < 800
    +    _ == _ = False
    +#endif
    +
    +instance All Show (Map f as) => Show (ListOf f as) where
    +    showsPrec _ Nil = showString "Nil"
    +    showsPrec d (x :/ xs) = showParen (d > 10)
    +                                $ showsPrec 6 x . showString " :/ "
    +                                    . showsPrec 6 xs
    +
    +type List = ListOf Identity
    +
    +-- | Equivalent of ':/' for lists.
    +(/:/) :: a -> List as -> List (a ': as)
    +(/:/) = (:/) . Identity
    +
    +infixr 5 /:/
    +
    +-- | A 'Constraint' specifying the possible choices of a 'TensorType'.
    +--
    +-- We implement a 'Constraint' like @OneOf '[Double, Float] a@ by turning the
    +-- natural representation as a conjunction, i.e.,
    +--
    +-- @
    +--    a == Double || a == Float
    +-- @
    +--
    +-- into a disjunction like
    +--
    +-- @
    +--     a \/= Int32 && a \/= Int64 && a \/= ByteString && ...
    +-- @
    +--
    +-- using an enumeration of all the possible 'TensorType's.
    +type OneOf ts a
    +    -- Assert `TensorTypes' ts` to make error messages a little better.
    +    = (TensorType a, TensorTypes' ts, NoneOf (AllTensorTypes \\ ts) a)
    +
    +type OneOfs ts as = (TensorTypes as, TensorTypes' ts,
    +                        NoneOfs (AllTensorTypes \\ ts) as)
    +
    +type family NoneOfs ts as :: Constraint where
    +    NoneOfs ts '[] = ()
    +    NoneOfs ts (a ': as) = (NoneOf ts a, NoneOfs ts as)
    +
    +data TensorTypeProxy a where
    +    TensorTypeProxy :: TensorType a => TensorTypeProxy a
    +
    +type TensorTypeList = ListOf TensorTypeProxy
    +
    +fromTensorTypeList :: TensorTypeList ts -> [DataType]
    +fromTensorTypeList Nil = []
    +fromTensorTypeList ((TensorTypeProxy :: TensorTypeProxy t) :/ ts)
    +    = tensorType (undefined :: t) : fromTensorTypeList ts
    +
    +fromTensorTypes :: forall as . TensorTypes as => Proxy as -> [DataType]
    +fromTensorTypes _ = fromTensorTypeList (tensorTypes :: TensorTypeList as)
    +
    +class TensorTypes (ts :: [*]) where
    +    tensorTypes :: TensorTypeList ts
    +
    +instance TensorTypes '[] where
    +    tensorTypes = Nil
    +
    +-- | A constraint that the input is a list of 'TensorTypes'.
    +instance (TensorType t, TensorTypes ts) => TensorTypes (t ': ts) where
    +    tensorTypes = TensorTypeProxy :/ tensorTypes
    +
    +-- | A simpler version of the 'TensorTypes' class, that doesn't run
    +-- afoul of @-Wsimplifiable-class-constraints@.
    +--
    +-- In more detail: the constraint @OneOf '[Double, Float] a@ leads
    +-- to the constraint @TensorTypes' '[Double, Float]@, as a safety-check
    +-- to give better error messages.  However, if @TensorTypes'@ were a class,
    +-- then GHC 8.2.1 would complain with the above warning unless @NoMonoBinds@
    +-- were enabled.  So instead, we use a separate type family for this purpose.
    +-- For more details: https://ghc.haskell.org/trac/ghc/ticket/11948
    +type family TensorTypes' (ts :: [*]) :: Constraint where
    +    -- Specialize this type family when `ts` is a long list, to avoid deeply
    +    -- nested tuples of constraints.  Works around a bug in ghc-8.0:
    +    -- https://ghc.haskell.org/trac/ghc/ticket/12175
    +    TensorTypes' (t1 ': t2 ': t3 ': t4 ': ts)
    +        = (TensorType t1, TensorType t2, TensorType t3, TensorType t4
    +              , TensorTypes' ts)
    +    TensorTypes' (t1 ': t2 ': t3 ': ts)
    +        = (TensorType t1, TensorType t2, TensorType t3, TensorTypes' ts)
    +    TensorTypes' (t1 ': t2 ': ts)
    +        = (TensorType t1, TensorType t2, TensorTypes' ts)
    +    TensorTypes' (t ': ts) = (TensorType t, TensorTypes' ts)
    +    TensorTypes' '[] = ()
    +
    +-- | A constraint checking that two types are different.
    +type family a /= b :: Constraint where
    +    a /= a = TypeError a ~ ExcludedCase
    +    a /= b = ()
    +
    +-- | Helper types to produce a reasonable type error message when the Constraint
    +-- "a /= a" fails.
    +-- TODO(judahjacobson): Use ghc-8's CustomTypeErrors for this.
    +data TypeError a
    +data ExcludedCase
    +
    +-- | An enumeration of all valid 'TensorType's.
    +type AllTensorTypes =
    +    -- NOTE: This list should be kept in sync with
    +    -- TensorFlow.OpGen.dtTypeToHaskell.
    +    -- TODO: Add support for Complex Float/Double.
    +    '[ Float
    +     , Double
    +     , Int8
    +     , Int16
    +     , Int32
    +     , Int64
    +     , Word8
    +     , Word16
    +     , ByteString
    +     , Bool
    +     ]
    +
    +-- | Removes a type from the given list of types.
    +type family Delete a as where
    +    Delete a '[] = '[]
    +    Delete a (a ': as) = Delete a as
    +    Delete a (b ': as) = b ': Delete a as
    +
    +-- | Takes the difference of two lists of types.
    +type family as \\ bs where
    +    as \\ '[] = as
    +    as \\ (b ': bs) = Delete b as \\ bs
    +
    +-- | A constraint that the type @a@ doesn't appear in the type list @ts@.
    +-- Assumes that @a@ and each of the elements of @ts@ are 'TensorType's.
    +type family NoneOf ts a :: Constraint where
    +    -- Specialize this type family when `ts` is a long list, to avoid deeply
    +    -- nested tuples of constraints.  Works around a bug in ghc-8.0:
    +    -- https://ghc.haskell.org/trac/ghc/ticket/12175
    +    NoneOf (t1 ': t2 ': t3 ': t4 ': ts) a
    +        = (a /= t1, a /= t2, a /= t3, a /= t4, NoneOf ts a)
    +    NoneOf (t1 ': t2 ': t3 ': ts) a = (a /= t1, a /= t2, a /= t3, NoneOf ts a)
    +    NoneOf (t1 ': t2 ': ts) a = (a /= t1, a /= t2, NoneOf ts a)
    +    NoneOf (t1 ': ts) a = (a /= t1, NoneOf ts a)
    +    NoneOf '[] a = ()
    +
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.2/src/highlight.js b/docs/haddock/tensorflow-0.1.0.2/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-0.1.0.2/src/style.css b/docs/haddock/tensorflow-0.1.0.2/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.2/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-0.1.0.2/synopsis.png similarity index 100% rename from docs/haddock/tensorflow-0.1.0.0/synopsis.png rename to docs/haddock/tensorflow-0.1.0.2/synopsis.png diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/LICENSE b/docs/haddock/tensorflow-core-ops-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html index 1eb3a3a..9594dda 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html @@ -1,17 +1,18 @@ -TensorFlow.GenOps.Core

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.GenOps.Core

    Synopsis

    Documentation

    abort :: forall m'. MonadBuild m' => m' ControlNode

    Raise a exception to abort the process when called. If exit_without_error is true, the process will exit normally, otherwise it will exit with a SIGABORT signal.

    Returns nothing but an exception.

    abort' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode

    abs

    Arguments

    :: OneOf `[Int32, Int64, Word16, Double, Float]` t 
    => Tensor v'1 t

    x

    -> Tensor Build t

    y

    Computes the absolute value of a tensor.

    Given a tensor x, this operation returns a tensor containing the absolute +

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.GenOps.Core

    Synopsis

    Documentation

    abort :: forall m'. MonadBuild m' => m' ControlNode Source #

    Raise a exception to abort the process when called.

    If exit_without_error is true, the process will exit normally, + otherwise it will exit with a SIGABORT signal.

    Returns nothing but an exception.

    abort' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode Source #

    abs Source #

    Arguments

    :: OneOf '[Int32, Int64, Word16, Double, Float] t 
    => Tensor v'1 t

    x

    -> Tensor Build t

    y

    Computes the absolute value of a tensor.

    Given a tensor x, this operation returns a tensor containing the absolute value of each element in x. For example, if x is an input element and y is - an output element, this operation computes \(y = |x|\).

    abs'

    Arguments

    :: OneOf `[Int32, Int64, Word16, Double, Float]` t 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor Build t

    y

    accumulatorApplyGradient

    Arguments

    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
    => Tensor Ref ByteString

    handle: The handle to a accumulator.

    -> Tensor v'2 Int64

    local_step: The local_step value at which the gradient was computed.

    -> Tensor v'3 dtype

    gradient: A tensor of the gradient to be accumulated.

    -> m' ControlNode 

    Applies a gradient to a given accumulator. Does not add if local_step is lesser

    than the accumulator's global_step.

    accumulatorApplyGradient'

    Arguments

    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to a accumulator.

    -> Tensor v'2 Int64

    local_step: The local_step value at which the gradient was computed.

    -> Tensor v'3 dtype

    gradient: A tensor of the gradient to be accumulated.

    -> m' ControlNode 

    accumulatorNumAccumulated

    Arguments

    :: MonadBuild m' 
    => Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> m' (Tensor Value Int32)

    num_accumulated: The number of gradients aggregated in the given accumulator.

    Returns the number of gradients aggregated in the given accumulators.

    accumulatorNumAccumulated'

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> m' (Tensor Value Int32)

    num_accumulated: The number of gradients aggregated in the given accumulator.

    accumulatorSetGlobalStep

    Arguments

    :: MonadBuild m' 
    => Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> Tensor v'2 Int64

    new_global_step: The new global_step value to set.

    -> m' ControlNode 

    Updates the accumulator with a new value for global_step. Logs warning if the

    accumulator's value is already higher than new_global_step.

    accumulatorSetGlobalStep'

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> Tensor v'2 Int64

    new_global_step: The new global_step value to set.

    -> m' ControlNode 

    accumulatorTakeGradient

    Arguments

    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
    => Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> Tensor v'2 Int32

    num_required: Number of gradients required before we return an aggregate.

    -> m' (Tensor Value dtype)

    average: The average of the accumulated gradients.

    Extracts the average gradient in the given ConditionalAccumulator, provided

    that sufficient (i.e., more than num_required) gradients have been accumulated. - The op blocks until sufficient gradients have been accumulated. - If the accumulator has already aggregated more than num_required gradients, it - returns the average of the accumulated gradients. - Also automatically increments the recorded global_step in the accumulator by 1, - and resets the aggregate to 0.

    accumulatorTakeGradient'

    Arguments

    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> Tensor v'2 Int32

    num_required: Number of gradients required before we return an aggregate.

    -> m' (Tensor Value dtype)

    average: The average of the accumulated gradients.

    acos

    Arguments

    :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
    => Tensor v'1 t

    x

    -> Tensor Build t

    y

    Computes acos of x element-wise.

    add

    Arguments

    :: OneOf `[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
    => Tensor v'1 t

    x

    -> Tensor v'2 t

    y

    -> Tensor Build t

    z

    Returns x + y element-wise.

    • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting - here

    addManySparseToTensorsMap

    Arguments

    :: (MonadBuild m', TensorType t) 
    => Tensor v'1 Int64

    sparse_indices: 2-D. The indices of the minibatch SparseTensor. - `sparse_indices[:, 0]` must be ordered values in `[0, N)`.

    -> Tensor v'2 t

    sparse_values: 1-D. The values of the minibatch SparseTensor.

    -> Tensor v'3 Int64

    sparse_shape: 1-D. The shape of the minibatch SparseTensor. - The minibatch size `N == sparse_shape[0]`.

    -> m' (Tensor Value Int64)

    sparse_handles: 1-D. The handles of the SparseTensor now stored in the + an output element, this operation computes \(y = |x|\).

    abs' Source #

    Arguments

    :: OneOf '[Int32, Int64, Word16, Double, Float] t 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor Build t

    y

    accumulatorApplyGradient Source #

    Arguments

    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) 
    => Tensor Ref ByteString

    handle: The handle to a accumulator.

    -> Tensor v'2 Int64

    local_step: The local_step value at which the gradient was computed.

    -> Tensor v'3 dtype

    gradient: A tensor of the gradient to be accumulated.

    -> m' ControlNode 

    Applies a gradient to a given accumulator.

    Does not add if local_step is lesser than the accumulator's global_step.

    accumulatorApplyGradient' Source #

    Arguments

    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to a accumulator.

    -> Tensor v'2 Int64

    local_step: The local_step value at which the gradient was computed.

    -> Tensor v'3 dtype

    gradient: A tensor of the gradient to be accumulated.

    -> m' ControlNode 

    accumulatorNumAccumulated Source #

    Arguments

    :: MonadBuild m' 
    => Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> m' (Tensor Value Int32)

    num_accumulated: The number of gradients aggregated in the given accumulator.

    Returns the number of gradients aggregated in the given accumulators.

    accumulatorNumAccumulated' Source #

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> m' (Tensor Value Int32)

    num_accumulated: The number of gradients aggregated in the given accumulator.

    accumulatorSetGlobalStep Source #

    Arguments

    :: MonadBuild m' 
    => Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> Tensor v'2 Int64

    new_global_step: The new global_step value to set.

    -> m' ControlNode 

    Updates the accumulator with a new value for global_step.

    Logs warning if the accumulator's value is already higher than + new_global_step.

    accumulatorSetGlobalStep' Source #

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> Tensor v'2 Int64

    new_global_step: The new global_step value to set.

    -> m' ControlNode 

    accumulatorTakeGradient Source #

    Arguments

    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) 
    => Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> Tensor v'2 Int32

    num_required: Number of gradients required before we return an aggregate.

    -> m' (Tensor Value dtype)

    average: The average of the accumulated gradients.

    Extracts the average gradient in the given ConditionalAccumulator.

    The op blocks until sufficient (i.e., more than num_required) + gradients have been accumulated. If the accumulator has already + aggregated more than num_required gradients, it returns the average of + the accumulated gradients. Also automatically increments the recorded + global_step in the accumulator by 1, and resets the aggregate to 0.

    accumulatorTakeGradient' Source #

    Arguments

    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to an accumulator.

    -> Tensor v'2 Int32

    num_required: Number of gradients required before we return an aggregate.

    -> m' (Tensor Value dtype)

    average: The average of the accumulated gradients.

    acos Source #

    Arguments

    :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
    => Tensor v'1 t

    x

    -> Tensor Build t

    y

    Computes acos of x element-wise.

    acosh Source #

    Arguments

    :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
    => Tensor v'1 t

    x

    -> Tensor Build t

    y

    Computes inverse hyperbolic cosine of x element-wise.

    add Source #

    Returns x + y element-wise.

    • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting + here

    addManySparseToTensorsMap Source #

    Arguments

    :: (MonadBuild m', TensorType t) 
    => Tensor v'1 Int64

    sparse_indices: 2-D. The indices of the minibatch SparseTensor. + `sparse_indices[:, 0]` must be ordered values in `[0, N)`.

    -> Tensor v'2 t

    sparse_values: 1-D. The values of the minibatch SparseTensor.

    -> Tensor v'3 Int64

    sparse_shape: 1-D. The shape of the minibatch SparseTensor. + The minibatch size `N == sparse_shape[0]`.

    -> m' (Tensor Value Int64)

    sparse_handles: 1-D. The handles of the SparseTensor now stored in the SparseTensorsMap. Shape: `[N]`.

    Add an N-minibatch SparseTensor to a SparseTensorsMap, return N handles.

    A SparseTensor of rank R is represented by three tensors: sparse_indices, sparse_values, and sparse_shape, where

    ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```

    An N-minibatch of SparseTensor objects is represented as a SparseTensor having a first sparse_indices column taking values between `[0, N)`, where @@ -25,10 +26,10 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core container and shared_name are passed to that Op. If no shared_name is provided here, instead use the *name* of the Operation created by calling AddManySparseToTensorsMap as the shared_name passed to - TakeManySparseFromTensorsMap. Ensure the Operations are colocated.

    addManySparseToTensorsMap'

    Arguments

    :: (MonadBuild m', TensorType t) 
    => OpParams 
    -> Tensor v'1 Int64

    sparse_indices: 2-D. The indices of the minibatch SparseTensor. - `sparse_indices[:, 0]` must be ordered values in `[0, N)`.

    -> Tensor v'2 t

    sparse_values: 1-D. The values of the minibatch SparseTensor.

    -> Tensor v'3 Int64

    sparse_shape: 1-D. The shape of the minibatch SparseTensor. - The minibatch size `N == sparse_shape[0]`.

    -> m' (Tensor Value Int64)

    sparse_handles: 1-D. The handles of the SparseTensor now stored in the - SparseTensorsMap. Shape: `[N]`.

    addN

    Arguments

    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
    => [Tensor v'1 t]

    inputs: Must all be the same size and shape.

    -> Tensor Build t

    sum

    Add all input tensors element wise.

    addN'

    Arguments

    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
    => OpParams 
    -> [Tensor v'1 t]

    inputs: Must all be the same size and shape.

    -> Tensor Build t

    sum

    addSparseToTensorsMap

    Arguments

    :: (MonadBuild m', TensorType t) 
    => Tensor v'1 Int64

    sparse_indices: 2-D. The indices of the SparseTensor.

    -> Tensor v'2 t

    sparse_values: 1-D. The values of the SparseTensor.

    -> Tensor v'3 Int64

    sparse_shape: 1-D. The shape of the SparseTensor.

    -> m' (Tensor Value Int64)

    sparse_handle: 0-D. The handle of the SparseTensor now stored in the + TakeManySparseFromTensorsMap. Ensure the Operations are colocated.

    addManySparseToTensorsMap' Source #

    Arguments

    :: (MonadBuild m', TensorType t) 
    => OpParams 
    -> Tensor v'1 Int64

    sparse_indices: 2-D. The indices of the minibatch SparseTensor. + `sparse_indices[:, 0]` must be ordered values in `[0, N)`.

    -> Tensor v'2 t

    sparse_values: 1-D. The values of the minibatch SparseTensor.

    -> Tensor v'3 Int64

    sparse_shape: 1-D. The shape of the minibatch SparseTensor. + The minibatch size `N == sparse_shape[0]`.

    -> m' (Tensor Value Int64)

    sparse_handles: 1-D. The handles of the SparseTensor now stored in the + SparseTensorsMap. Shape: `[N]`.

    addN Source #

    Arguments

    :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
    => [Tensor v'1 t]

    inputs: Must all be the same size and shape.

    -> Tensor Build t

    sum

    Add all input tensors element wise.

    addN' Source #

    Arguments

    :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
    => OpParams 
    -> [Tensor v'1 t]

    inputs: Must all be the same size and shape.

    -> Tensor Build t

    sum

    addSparseToTensorsMap Source #

    Arguments

    :: (MonadBuild m', TensorType t) 
    => Tensor v'1 Int64

    sparse_indices: 2-D. The indices of the SparseTensor.

    -> Tensor v'2 t

    sparse_values: 1-D. The values of the SparseTensor.

    -> Tensor v'3 Int64

    sparse_shape: 1-D. The shape of the SparseTensor.

    -> m' (Tensor Value Int64)

    sparse_handle: 0-D. The handle of the SparseTensor now stored in the SparseTensorsMap.

    Add a SparseTensor to a SparseTensorsMap return its handle.

    A SparseTensor is represented by three tensors: sparse_indices, sparse_values, and sparse_shape.

    This operator takes the given SparseTensor and adds it to a container object (a SparseTensorsMap). A unique key within this container is generated @@ -38,24 +39,24 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core container and shared_name are passed to that Op. If no shared_name is provided here, instead use the *name* of the Operation created by calling AddSparseToTensorsMap as the shared_name passed to - TakeManySparseFromTensorsMap. Ensure the Operations are colocated.

    addSparseToTensorsMap'

    Arguments

    :: (MonadBuild m', TensorType t) 
    => OpParams 
    -> Tensor v'1 Int64

    sparse_indices: 2-D. The indices of the SparseTensor.

    -> Tensor v'2 t

    sparse_values: 1-D. The values of the SparseTensor.

    -> Tensor v'3 Int64

    sparse_shape: 1-D. The shape of the SparseTensor.

    -> m' (Tensor Value Int64)

    sparse_handle: 0-D. The handle of the SparseTensor now stored in the - SparseTensorsMap.

    adjustContrast

    Arguments

    :: OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t 
    => Tensor v'1 t

    images

    -> Tensor v'2 Float

    contrast_factor

    -> Tensor v'3 Float

    min_value

    -> Tensor v'4 Float

    max_value

    -> Tensor Build Float

    output

    Deprecated. Disallowed in GraphDef version >= 2.

    adjustContrast'

    Arguments

    :: OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t 
    => OpParams 
    -> Tensor v'1 t

    images

    -> Tensor v'2 Float

    contrast_factor

    -> Tensor v'3 Float

    min_value

    -> Tensor v'4 Float

    max_value

    -> Tensor Build Float

    output

    adjustContrastv2

    Arguments

    :: Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    contrast_factor: A float multiplier for adjusting contrast.

    -> Tensor Build Float

    output: The contrast-adjusted image or images.

    Adjust the contrast of one or more images.

    images is a tensor of at least 3 dimensions. The last 3 dimensions are + TakeManySparseFromTensorsMap. Ensure the Operations are colocated.

    addSparseToTensorsMap' Source #

    Arguments

    :: (MonadBuild m', TensorType t) 
    => OpParams 
    -> Tensor v'1 Int64

    sparse_indices: 2-D. The indices of the SparseTensor.

    -> Tensor v'2 t

    sparse_values: 1-D. The values of the SparseTensor.

    -> Tensor v'3 Int64

    sparse_shape: 1-D. The shape of the SparseTensor.

    -> m' (Tensor Value Int64)

    sparse_handle: 0-D. The handle of the SparseTensor now stored in the + SparseTensorsMap.

    adjustContrast Source #

    Arguments

    :: OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t 
    => Tensor v'1 t

    images

    -> Tensor v'2 Float

    contrast_factor

    -> Tensor v'3 Float

    min_value

    -> Tensor v'4 Float

    max_value

    -> Tensor Build Float

    output

    Deprecated. Disallowed in GraphDef version >= 2.

    adjustContrast' Source #

    Arguments

    :: OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t 
    => OpParams 
    -> Tensor v'1 t

    images

    -> Tensor v'2 Float

    contrast_factor

    -> Tensor v'3 Float

    min_value

    -> Tensor v'4 Float

    max_value

    -> Tensor Build Float

    output

    adjustContrastv2 Source #

    Arguments

    :: Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    contrast_factor: A float multiplier for adjusting contrast.

    -> Tensor Build Float

    output: The contrast-adjusted image or images.

    Adjust the contrast of one or more images.

    images is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as `[height, width, channels]`. The other dimensions only represent a collection of images, such as `[batch, height, width, channels].`

    Contrast is adjusted independently for each channel of each image.

    For each channel, the Op first computes the mean of the image pixels in the channel and then adjusts each component of each pixel to - `(x - mean) * contrast_factor + mean`.

    adjustContrastv2'

    Arguments

    :: OpParams 
    -> Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    contrast_factor: A float multiplier for adjusting contrast.

    -> Tensor Build Float

    output: The contrast-adjusted image or images.

    adjustHue

    Arguments

    :: Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    delta: A float delta to add to the hue.

    -> Tensor Build Float

    output: The hue-adjusted image or images.

    Adjust the hue of one or more images.

    images is a tensor of at least 3 dimensions. The last dimension is + `(x - mean) * contrast_factor + mean`.

    adjustContrastv2' Source #

    Arguments

    :: OpParams 
    -> Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    contrast_factor: A float multiplier for adjusting contrast.

    -> Tensor Build Float

    output: The contrast-adjusted image or images.

    adjustHue Source #

    Arguments

    :: Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    delta: A float delta to add to the hue.

    -> Tensor Build Float

    output: The hue-adjusted image or images.

    Adjust the hue of one or more images.

    images is a tensor of at least 3 dimensions. The last dimension is interpretted as channels, and must be three.

    The input image is considered in the RGB colorspace. Conceptually, the RGB colors are first mapped into HSV. A delta is then applied all the hue values, - and then remapped back to RGB colorspace.

    adjustHue'

    Arguments

    :: OpParams 
    -> Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    delta: A float delta to add to the hue.

    -> Tensor Build Float

    output: The hue-adjusted image or images.

    adjustSaturation

    Arguments

    :: Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    scale: A float scale to add to the saturation.

    -> Tensor Build Float

    output: The hue-adjusted image or images.

    Adjust the saturation of one or more images.

    images is a tensor of at least 3 dimensions. The last dimension is + and then remapped back to RGB colorspace.

    adjustHue' Source #

    Arguments

    :: OpParams 
    -> Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    delta: A float delta to add to the hue.

    -> Tensor Build Float

    output: The hue-adjusted image or images.

    adjustSaturation Source #

    Arguments

    :: Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    scale: A float scale to add to the saturation.

    -> Tensor Build Float

    output: The hue-adjusted image or images.

    Adjust the saturation of one or more images.

    images is a tensor of at least 3 dimensions. The last dimension is interpretted as channels, and must be three.

    The input image is considered in the RGB colorspace. Conceptually, the RGB colors are first mapped into HSV. A scale is then applied all the saturation - values, and then remapped back to RGB colorspace.

    adjustSaturation'

    Arguments

    :: OpParams 
    -> Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    scale: A float scale to add to the saturation.

    -> Tensor Build Float

    output: The hue-adjusted image or images.

    all

    Arguments

    :: OneOf `[Int32, Int64]` tidx 
    => Tensor v'1 Bool

    input: The tensor to reduce.

    -> Tensor v'2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Build Bool

    output: The reduced tensor.

    Computes the "logical and" of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + values, and then remapped back to RGB colorspace.

    adjustSaturation' Source #

    Arguments

    :: OpParams 
    -> Tensor v'1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v'2 Float

    scale: A float scale to add to the saturation.

    -> Tensor Build Float

    output: The hue-adjusted image or images.

    all Source #

    Arguments

    :: OneOf '[Int32, Int64] tidx 
    => Tensor v'1 Bool

    input: The tensor to reduce.

    -> Tensor v'2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Build Bool

    output: The reduced tensor.

    Computes the "logical and" of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

    all'

    Arguments

    :: OneOf `[Int32, Int64]` tidx 
    => OpParams 
    -> Tensor v'1 Bool

    input: The tensor to reduce.

    -> Tensor v'2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Build Bool

    output: The reduced tensor.

    allCandidateSampler

    Arguments

    :: Int64

    num_sampled: Number of candidates to produce per batch.

    -> Int64

    num_true: Number of true labels per context.

    -> Bool

    unique: If unique is true, we sample with rejection, so that all sampled + retained with length 1.

    all' Source #

    Arguments

    :: OneOf '[Int32, Int64] tidx 
    => OpParams 
    -> Tensor v'1 Bool

    input: The tensor to reduce.

    -> Tensor v'2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Build Bool

    output: The reduced tensor.

    allCandidateSampler Source #

    Arguments

    :: MonadBuild m' 
    => Int64

    num_sampled: Number of candidates to produce.

    -> Int64

    num_true: Number of true labels per context.

    -> Bool

    unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

    -> Tensor v'1 Int64

    true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

    -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

    (sampled_candidates, true_expected_count, sampled_expected_count)

    • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

    -> Tensor v'1 Int64

    true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

    -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

    (sampled_candidates, true_expected_count, sampled_expected_count)

    audioSummary' Source #

    Arguments

    :: OpParams 
    -> Float

    sample_rate: The sample rate of the signal in hertz.

    -> Tensor v'1 ByteString

    tag: Scalar. Used to build the tag attribute of the summary values.

    -> Tensor v'2 Float

    tensor: 2-D of shape `[batch_size, frames]`.

    -> Tensor Build ByteString

    summary: Scalar. Serialized Summary protocol buffer.

    audioSummaryV2 Source #

    Arguments

    :: Tensor v'1 ByteString

    tag: Scalar. Used to build the tag attribute of the summary values.

    -> Tensor v'2 Float

    tensor: 2-D of shape `[batch_size, frames]`.

    -> Tensor v'3 Float

    sample_rate: The sample rate of the signal in hertz.

    -> Tensor Build ByteString

    summary: Scalar. Serialized Summary protocol buffer.

    Outputs a Summary protocol buffer with audio.

    The summary has up to max_outputs summary values containing audio. The audio is built from tensor which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are - assumed to be in the range of `[-1.0, 1.0]` with a sample rate of sample_rate.

    The tag argument is a scalar Tensor of type string. It is used to + assumed to be in the range of `[-1.0, 1.0]` with a sample rate of sample_rate.

    The tag argument is a scalar Tensor of type string. It is used to build the tag of the summary values:

    • If max_outputs is 1, the summary value tag is '*tag*/audio'.
    • If max_outputs is greater than 1, the summary value tags are - generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.

    audioSummaryV2'

    Arguments

    :: OpParams 
    -> Tensor v'1 ByteString

    tag: Scalar. Used to build the tag attribute of the summary values.

    -> Tensor v'2 Float

    tensor: 2-D of shape `[batch_size, frames]`.

    -> Tensor v'3 Float

    sample_rate: The sample rate of the signal in hertz.

    -> Tensor Build ByteString

    summary: Scalar. Serialized Summary protocol buffer.

    avgPool

    Arguments

    :: OneOf `[Word16, Double, Float]` t 
    => Tensor v'1 t

    value: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor Build t

    output: The average pooled output tensor.

    Performs average pooling on the input.

    Each entry in output is the mean of the corresponding size ksize - window in value.

    avgPool'

    Arguments

    :: OneOf `[Word16, Double, Float]` t 
    => OpParams 
    -> Tensor v'1 t

    value: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor Build t

    output: The average pooled output tensor.

    avgPool3D

    Arguments

    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
    => Tensor v'1 t

    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

    -> Tensor Build t

    output: The average pooled output tensor.

    Performs 3D average pooling on the input.

    avgPool3D'

    Arguments

    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
    => OpParams 
    -> Tensor v'1 t

    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

    -> Tensor Build t

    output: The average pooled output tensor.

    avgPool3DGrad

    Arguments

    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
    => Tensor v'1 Int32

    orig_input_shape: The original input dimensions.

    -> Tensor v'2 t

    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

    -> Tensor Build t

    output: The backprop for input.

    Computes gradients of average pooling function.

    avgPool3DGrad'

    Arguments

    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
    => OpParams 
    -> Tensor v'1 Int32

    orig_input_shape: The original input dimensions.

    -> Tensor v'2 t

    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

    -> Tensor Build t

    output: The backprop for input.

    avgPoolGrad

    Arguments

    :: OneOf `[Word16, Double, Float]` t 
    => Tensor v'1 Int32

    orig_input_shape: 1-D. Shape of the original input to avg_pool.

    -> Tensor v'2 t

    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. - the output of avg_pool.

    -> Tensor Build t

    output: 4-D. Gradients w.r.t. the input of avg_pool.

    Computes gradients of the average pooling function.

    avgPoolGrad'

    Arguments

    :: OneOf `[Word16, Double, Float]` t 
    => OpParams 
    -> Tensor v'1 Int32

    orig_input_shape: 1-D. Shape of the original input to avg_pool.

    -> Tensor v'2 t

    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. - the output of avg_pool.

    -> Tensor Build t

    output: 4-D. Gradients w.r.t. the input of avg_pool.

    barrier

    Arguments

    :: MonadBuild m' 
    => [DataType]

    component_types: The type of each component in a value.

    -> m' (Tensor Ref ByteString)

    handle: The handle to the barrier.

    Defines a barrier that persists across different graph executions.

    A barrier represents a key-value map, where each key is a string, and + generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.

    audioSummaryV2' Source #

    Arguments

    :: OpParams 
    -> Tensor v'1 ByteString

    tag: Scalar. Used to build the tag attribute of the summary values.

    -> Tensor v'2 Float

    tensor: 2-D of shape `[batch_size, frames]`.

    -> Tensor v'3 Float

    sample_rate: The sample rate of the signal in hertz.

    -> Tensor Build ByteString

    summary: Scalar. Serialized Summary protocol buffer.

    avgPool Source #

    Arguments

    :: OneOf '[Word16, Double, Float] t 
    => Tensor v'1 t

    value: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor Build t

    output: The average pooled output tensor.

    Performs average pooling on the input.

    Each entry in output is the mean of the corresponding size ksize + window in value.

    avgPool' Source #

    Arguments

    :: OneOf '[Word16, Double, Float] t 
    => OpParams 
    -> Tensor v'1 t

    value: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor Build t

    output: The average pooled output tensor.

    avgPool3D Source #

    Arguments

    :: OneOf '[Double, Float] t 
    => Tensor v'1 t

    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

    -> Tensor Build t

    output: The average pooled output tensor.

    Performs 3D average pooling on the input.

    avgPool3D' Source #

    Arguments

    :: OneOf '[Double, Float] t 
    => OpParams 
    -> Tensor v'1 t

    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

    -> Tensor Build t

    output: The average pooled output tensor.

    avgPool3DGrad Source #

    Arguments

    :: OneOf '[Double, Float] t 
    => Tensor v'1 Int32

    orig_input_shape: The original input dimensions.

    -> Tensor v'2 t

    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

    -> Tensor Build t

    output: The backprop for input.

    Computes gradients of average pooling function.

    avgPool3DGrad' Source #

    Arguments

    :: OneOf '[Double, Float] t 
    => OpParams 
    -> Tensor v'1 Int32

    orig_input_shape: The original input dimensions.

    -> Tensor v'2 t

    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

    -> Tensor Build t

    output: The backprop for input.

    avgPoolGrad Source #

    Arguments

    :: OneOf '[Word16, Double, Float] t 
    => Tensor v'1 Int32

    orig_input_shape: 1-D. Shape of the original input to avg_pool.

    -> Tensor v'2 t

    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. + the output of avg_pool.

    -> Tensor Build t

    output: 4-D. Gradients w.r.t. the input of avg_pool.

    Computes gradients of the average pooling function.

    avgPoolGrad' Source #

    Arguments

    :: OneOf '[Word16, Double, Float] t 
    => OpParams 
    -> Tensor v'1 Int32

    orig_input_shape: 1-D. Shape of the original input to avg_pool.

    -> Tensor v'2 t

    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. + the output of avg_pool.

    -> Tensor Build t

    output: 4-D. Gradients w.r.t. the input of avg_pool.

    barrier Source #

    Arguments

    :: MonadBuild m' 
    => [DataType]

    component_types: The type of each component in a value.

    -> m' (Tensor Ref ByteString)

    handle: The handle to the barrier.

    Defines a barrier that persists across different graph executions.

    A barrier represents a key-value map, where each key is a string, and each value is a tuple of tensors.

    At runtime, the barrier contains complete and incomplete elements. A complete element has defined tensors for all components of its value tuple, and may be accessed using BarrierTakeMany. An incomplete element has some undefined components in its value tuple, - and may be updated using BarrierInsertMany.

    barrier'

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> [DataType]

    component_types: The type of each component in a value.

    -> m' (Tensor Ref ByteString)

    handle: The handle to the barrier.

    barrierClose

    Arguments

    :: MonadBuild m' 
    => Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' ControlNode 

    Closes the given barrier.

    This operation signals that no more new elements will be inserted in the + and may be updated using BarrierInsertMany.

    barrier' Source #

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> [DataType]

    component_types: The type of each component in a value.

    -> m' (Tensor Ref ByteString)

    handle: The handle to the barrier.

    barrierClose Source #

    Arguments

    :: MonadBuild m' 
    => Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' ControlNode 

    Closes the given barrier.

    This operation signals that no more new elements will be inserted in the given barrier. Subsequent InsertMany that try to introduce a new key will fail. Subsequent InsertMany operations that just add missing components to already existing elements will continue to succeed. Subsequent TakeMany operations will continue to succeed if sufficient completed elements remain in the barrier. - Subsequent TakeMany operations that would block will fail immediately.

    barrierClose'

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' ControlNode 

    barrierIncompleteSize

    Arguments

    :: MonadBuild m' 
    => Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' (Tensor Value Int32)

    size: The number of incomplete elements (i.e. those with some of their value - components not set) in the barrier.

    Computes the number of incomplete elements in the given barrier.

    barrierIncompleteSize'

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' (Tensor Value Int32)

    size: The number of incomplete elements (i.e. those with some of their value - components not set) in the barrier.

    barrierInsertMany

    Arguments

    :: (MonadBuild m', TensorType t) 
    => Int64

    component_index: The component of the barrier elements that is being assigned.

    -> Tensor Ref ByteString

    handle: The handle to a barrier.

    -> Tensor v'2 ByteString

    keys: A one-dimensional tensor of keys, with length n.

    -> Tensor v'3 t

    values: An any-dimensional tensor of values, which are associated with the - respective keys. The 0th dimension must have length n.

    -> m' ControlNode 

    For each key, assigns the respective value to the specified component.

    If a key is not found in the barrier, this operation will create a new + Subsequent TakeMany operations that would block will fail immediately.

    barrierClose' Source #

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' ControlNode 

    barrierIncompleteSize Source #

    Arguments

    :: MonadBuild m' 
    => Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' (Tensor Value Int32)

    size: The number of incomplete elements (i.e. those with some of their value + components not set) in the barrier.

    Computes the number of incomplete elements in the given barrier.

    barrierIncompleteSize' Source #

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' (Tensor Value Int32)

    size: The number of incomplete elements (i.e. those with some of their value + components not set) in the barrier.

    barrierInsertMany Source #

    Arguments

    :: (MonadBuild m', TensorType t) 
    => Int64

    component_index: The component of the barrier elements that is being assigned.

    -> Tensor Ref ByteString

    handle: The handle to a barrier.

    -> Tensor v'2 ByteString

    keys: A one-dimensional tensor of keys, with length n.

    -> Tensor v'3 t

    values: An any-dimensional tensor of values, which are associated with the + respective keys. The 0th dimension must have length n.

    -> m' ControlNode 

    For each key, assigns the respective value to the specified component.

    If a key is not found in the barrier, this operation will create a new incomplete element. If a key is found in the barrier, and the element already has a value at component_index, this operation will fail with - INVALID_ARGUMENT, and leave the barrier in an undefined state.

    barrierInsertMany'

    Arguments

    :: (MonadBuild m', TensorType t) 
    => OpParams 
    -> Int64

    component_index: The component of the barrier elements that is being assigned.

    -> Tensor Ref ByteString

    handle: The handle to a barrier.

    -> Tensor v'2 ByteString

    keys: A one-dimensional tensor of keys, with length n.

    -> Tensor v'3 t

    values: An any-dimensional tensor of values, which are associated with the - respective keys. The 0th dimension must have length n.

    -> m' ControlNode 

    barrierReadySize

    Arguments

    :: MonadBuild m' 
    => Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' (Tensor Value Int32)

    size: The number of complete elements (i.e. those with all of their value - components set) in the barrier.

    Computes the number of complete elements in the given barrier.

    barrierReadySize'

    Arguments

    :: MonadBuild m' 
    => OpParams 
    -> Tensor Ref ByteString

    handle: The handle to a barrier.

    -> m' (Tensor Value Int32)

    size: The number of complete elements (i.e. those with all of their value - components set) in the barrier.

    barrierTakeMany

    Arguments

    :: (MonadBuild m', TensorTypes component_types) 
    => Tensor Ref ByteString

    handle: The handle to a barrier.

    -> Tensor v'2 Int32

    num_elements: A single-element tensor containing the number of elements to - take.

    -> m' (Tensor Value Int64, Tensor Value ByteString, TensorList Value component_types)

    (indices, keys, values)

    • indices: A one-dimensional tensor of indices, with length num_elems. + INVALID_ARGUMENT, and leave the barrier in an undefined state.

      barrierInsertMany' Source #

      Arguments

      :: (MonadBuild m', TensorType t) 
      => OpParams 
      -> Int64

      component_index: The component of the barrier elements that is being assigned.

      -> Tensor Ref ByteString

      handle: The handle to a barrier.

      -> Tensor v'2 ByteString

      keys: A one-dimensional tensor of keys, with length n.

      -> Tensor v'3 t

      values: An any-dimensional tensor of values, which are associated with the + respective keys. The 0th dimension must have length n.

      -> m' ControlNode 

      barrierReadySize Source #

      Arguments

      :: MonadBuild m' 
      => Tensor Ref ByteString

      handle: The handle to a barrier.

      -> m' (Tensor Value Int32)

      size: The number of complete elements (i.e. those with all of their value + components set) in the barrier.

      Computes the number of complete elements in the given barrier.

      barrierReadySize' Source #

      Arguments

      :: MonadBuild m' 
      => OpParams 
      -> Tensor Ref ByteString

      handle: The handle to a barrier.

      -> m' (Tensor Value Int32)

      size: The number of complete elements (i.e. those with all of their value + components set) in the barrier.

      barrierTakeMany Source #

      Arguments

      :: (MonadBuild m', TensorTypes component_types) 
      => Tensor Ref ByteString

      handle: The handle to a barrier.

      -> Tensor v'2 Int32

      num_elements: A single-element tensor containing the number of elements to + take.

      -> m' (Tensor Value Int64, Tensor Value ByteString, TensorList Value component_types)

      (indices, keys, values)

      • indices: A one-dimensional tensor of indices, with length num_elems. These indices refer to the batch in which the values were placed into the barrier (starting with MIN_LONG and increasing with each BarrierInsertMany).
      • keys: A one-dimensional tensor of keys, with length num_elements.
      • values: One any-dimensional tensor per component in a barrier element. All values have length num_elements in the 0th dimension.

      Takes the given number of completed elements from a barrier.

      This operation concatenates completed-element component tensors along the 0th dimension to make a single component tensor.

      Elements come out of the barrier when they are complete, and in the order in which they were placed into the barrier. The indices output provides information about the batch in which each element was originally inserted - into the barrier.

      barrierTakeMany'

      Arguments

      :: (MonadBuild m', TensorTypes component_types) 
      => OpParams 
      -> Tensor Ref ByteString

      handle: The handle to a barrier.

      -> Tensor v'2 Int32

      num_elements: A single-element tensor containing the number of elements to - take.

      -> m' (Tensor Value Int64, Tensor Value ByteString, TensorList Value component_types)

      (indices, keys, values)

      batchCholesky Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchCholesky' Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchCholeskyGrad Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => Tensor v'1 t

      l

      -> Tensor v'2 t

      grad

      -> Tensor Build t

      output

      batchCholeskyGrad' Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      l

      -> Tensor v'2 t

      grad

      -> Tensor Build t

      output

      batchDataset Source #

      Arguments

      :: MonadBuild m' 
      => [DataType]

      output_types

      -> Tensor v'1 ResourceHandle

      input_dataset

      -> Tensor v'2 Int64

      batch_size: A scalar representing the number of elements to accumulate in a + batch.

      -> m' (Tensor Value ResourceHandle)

      handle

      Creates a dataset that batches batch_size elements from input_dataset.

      batchDataset' Source #

      Arguments

      :: MonadBuild m' 
      => OpParams 
      -> [DataType]

      output_types

      -> Tensor v'1 ResourceHandle

      input_dataset

      -> Tensor v'2 Int64

      batch_size: A scalar representing the number of elements to accumulate in a + batch.

      -> m' (Tensor Value ResourceHandle)

      handle

      batchFFT Source #

      Arguments

      :: Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchFFT' Source #

      Arguments

      :: OpParams 
      -> Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchFFT2D Source #

      Arguments

      :: Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchFFT2D' Source #

      Arguments

      :: OpParams 
      -> Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchFFT3D Source #

      Arguments

      :: Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchFFT3D' Source #

      Arguments

      :: OpParams 
      -> Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchIFFT Source #

      Arguments

      :: Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchIFFT' Source #

      Arguments

      :: OpParams 
      -> Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchIFFT2D Source #

      Arguments

      :: Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchIFFT2D' Source #

      Arguments

      :: OpParams 
      -> Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchIFFT3D Source #

      Arguments

      :: Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchIFFT3D' Source #

      Arguments

      :: OpParams 
      -> Tensor v'1 (Complex Float)

      input

      -> Tensor Build (Complex Float)

      output

      batchMatMul Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t 
      => Tensor v'1 t

      x: 2-D or higher with shape `[..., r_x, c_x]`.

      -> Tensor v'2 t

      y: 2-D or higher with shape `[..., r_y, c_y]`.

      -> Tensor Build t

      output: 3-D or higher with shape `[..., r_o, c_o]`

      Multiplies slices of two tensors in batches.

      Multiplies all slices of Tensor x and y (each slice can be viewed as an element of a batch), and arranges the individual results in a single output tensor of the same batch size. Each of the individual slices can optionally be adjointed (to adjoint a matrix means to transpose and conjugate it) before multiplication by setting - the adj_x or adj_y flag to True, which are by default False.

      The input tensors x and y are 3-D or higher with shape `[..., r_x, c_x]` - and `[..., r_y, c_y]`.

      The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, where:

      r_o = c_x if adj_x else r_x - c_o = r_y if adj_y else c_y

      It is computed as:

      output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])

      batchMatMul'

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      x: 3-D or higher with shape `[..., r_x, c_x]`.

      -> Tensor v'2 t

      y: 3-D or higher with shape `[..., r_y, c_y]`.

      -> Tensor Build t

      output: 3-D or higher with shape `[..., r_o, c_o]`

      batchMatrixBandPart

      Arguments

      :: TensorType t 
      => Tensor v'1 t

      input

      -> Tensor v'2 Int64

      num_lower

      -> Tensor v'3 Int64

      num_upper

      -> Tensor Build t

      band

      batchMatrixBandPart'

      Arguments

      :: TensorType t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor v'2 Int64

      num_lower

      -> Tensor v'3 Int64

      num_upper

      -> Tensor Build t

      band

      batchMatrixDeterminant

      Arguments

      :: OneOf `[Double, Float]` t 
      => Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchMatrixDeterminant'

      Arguments

      :: OneOf `[Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchMatrixDiag

      Arguments

      :: TensorType t 
      => Tensor v'1 t

      diagonal

      -> Tensor Build t

      output

      batchMatrixDiag'

      Arguments

      :: TensorType t 
      => OpParams 
      -> Tensor v'1 t

      diagonal

      -> Tensor Build t

      output

      batchMatrixDiagPart

      Arguments

      :: TensorType t 
      => Tensor v'1 t

      input

      -> Tensor Build t

      diagonal

      batchMatrixDiagPart'

      Arguments

      :: TensorType t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor Build t

      diagonal

      batchMatrixInverse

      Arguments

      :: OneOf `[Double, Float]` t 
      => Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchMatrixInverse'

      Arguments

      :: OneOf `[Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchMatrixSetDiag

      Arguments

      :: TensorType t 
      => Tensor v'1 t

      input

      -> Tensor v'2 t

      diagonal

      -> Tensor Build t

      output

      batchMatrixSetDiag'

      Arguments

      :: TensorType t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor v'2 t

      diagonal

      -> Tensor Build t

      output

      batchMatrixSolve

      Arguments

      :: OneOf `[Double, Float]` t 
      => Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor Build t

      output

      batchMatrixSolve'

      Arguments

      :: OneOf `[Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor Build t

      output

      batchMatrixSolveLs

      Arguments

      :: OneOf `[Double, Float]` t 
      => Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor v'3 Double

      l2_regularizer

      -> Tensor Build t

      output

      batchMatrixSolveLs'

      Arguments

      :: OneOf `[Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor v'3 Double

      l2_regularizer

      -> Tensor Build t

      output

      batchMatrixTriangularSolve

      Arguments

      :: OneOf `[Double, Float]` t 
      => Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor Build t

      output

      batchMatrixTriangularSolve'

      Arguments

      :: OneOf `[Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor Build t

      output

      batchNormWithGlobalNormalization

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => Bool

      scale_after_normalization: A bool indicating whether the resulted tensor - needs to be multiplied with gamma.

      -> Float

      variance_epsilon: A small float number to avoid dividing by 0.

      -> Tensor v'1 t

      t: A 4D input Tensor.

      -> Tensor v'2 t

      m: A 1D mean Tensor with size matching the last dimension of t. + the adj_x or adj_y flag to True, which are by default False.

      The input tensors x and y are 2-D or higher with shape `[..., r_x, c_x]` + and `[..., r_y, c_y]`.

      The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:

      r_o = c_x if adj_x else r_x + c_o = r_y if adj_y else c_y

      It is computed as:

      output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])

      batchMatMul' Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      x: 2-D or higher with shape `[..., r_x, c_x]`.

      -> Tensor v'2 t

      y: 2-D or higher with shape `[..., r_y, c_y]`.

      -> Tensor Build t

      output: 3-D or higher with shape `[..., r_o, c_o]`

      batchMatrixBandPart Source #

      Arguments

      :: TensorType t 
      => Tensor v'1 t

      input

      -> Tensor v'2 Int64

      num_lower

      -> Tensor v'3 Int64

      num_upper

      -> Tensor Build t

      band

      batchMatrixBandPart' Source #

      Arguments

      :: TensorType t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor v'2 Int64

      num_lower

      -> Tensor v'3 Int64

      num_upper

      -> Tensor Build t

      band

      batchMatrixDiag Source #

      Arguments

      :: TensorType t 
      => Tensor v'1 t

      diagonal

      -> Tensor Build t

      output

      batchMatrixDiag' Source #

      Arguments

      :: TensorType t 
      => OpParams 
      -> Tensor v'1 t

      diagonal

      -> Tensor Build t

      output

      batchMatrixDiagPart Source #

      Arguments

      :: TensorType t 
      => Tensor v'1 t

      input

      -> Tensor Build t

      diagonal

      batchMatrixDiagPart' Source #

      Arguments

      :: TensorType t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor Build t

      diagonal

      batchMatrixInverse Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchMatrixInverse' Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchMatrixSetDiag Source #

      Arguments

      :: TensorType t 
      => Tensor v'1 t

      input

      -> Tensor v'2 t

      diagonal

      -> Tensor Build t

      output

      batchMatrixSetDiag' Source #

      Arguments

      :: TensorType t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor v'2 t

      diagonal

      -> Tensor Build t

      output

      batchMatrixSolve Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor Build t

      output

      batchMatrixSolve' Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor Build t

      output

      batchMatrixSolveLs Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor v'3 Double

      l2_regularizer

      -> Tensor Build t

      output

      batchMatrixSolveLs' Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor v'3 Double

      l2_regularizer

      -> Tensor Build t

      output

      batchMatrixTriangularSolve Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor Build t

      output

      batchMatrixTriangularSolve' Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      matrix

      -> Tensor v'2 t

      rhs

      -> Tensor Build t

      output

      batchNormWithGlobalNormalization Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => Bool

      scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

      -> Float

      variance_epsilon: A small float number to avoid dividing by 0.

      -> Tensor v'1 t

      t: A 4D input Tensor.

      -> Tensor v'2 t

      m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, - or a saved moving average thereof.

      -> Tensor v'3 t

      v: A 1D variance Tensor with size matching the last dimension of t. + or a saved moving average thereof.

      -> Tensor v'3 t

      v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, - or a saved moving average thereof.

      -> Tensor v'4 t

      beta: A 1D beta Tensor with size matching the last dimension of t. - An offset to be added to the normalized tensor.

      -> Tensor v'5 t

      gamma: A 1D gamma Tensor with size matching the last dimension of t. + or a saved moving average thereof.

      -> Tensor v'4 t

      beta: A 1D beta Tensor with size matching the last dimension of t. + An offset to be added to the normalized tensor.

      -> Tensor v'5 t

      gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied - with the normalized tensor.

      -> Tensor Build t

      result

      Batch normalization.

      This op is deprecated. Prefer `tf.nn.batch_normalization`.

      batchNormWithGlobalNormalization'

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => OpParams 
      -> Bool

      scale_after_normalization: A bool indicating whether the resulted tensor - needs to be multiplied with gamma.

      -> Float

      variance_epsilon: A small float number to avoid dividing by 0.

      -> Tensor v'1 t

      t: A 4D input Tensor.

      -> Tensor v'2 t

      m: A 1D mean Tensor with size matching the last dimension of t. + with the normalized tensor.

      -> Tensor Build t

      result

      Batch normalization.

      This op is deprecated. Prefer `tf.nn.batch_normalization`.

      batchNormWithGlobalNormalization' Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => OpParams 
      -> Bool

      scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

      -> Float

      variance_epsilon: A small float number to avoid dividing by 0.

      -> Tensor v'1 t

      t: A 4D input Tensor.

      -> Tensor v'2 t

      m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, - or a saved moving average thereof.

      -> Tensor v'3 t

      v: A 1D variance Tensor with size matching the last dimension of t. + or a saved moving average thereof.

      -> Tensor v'3 t

      v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, - or a saved moving average thereof.

      -> Tensor v'4 t

      beta: A 1D beta Tensor with size matching the last dimension of t. - An offset to be added to the normalized tensor.

      -> Tensor v'5 t

      gamma: A 1D gamma Tensor with size matching the last dimension of t. + or a saved moving average thereof.

      -> Tensor v'4 t

      beta: A 1D beta Tensor with size matching the last dimension of t. + An offset to be added to the normalized tensor.

      -> Tensor v'5 t

      gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied - with the normalized tensor.

      -> Tensor Build t

      result

      batchNormWithGlobalNormalizationGrad

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => Bool

      scale_after_normalization: A bool indicating whether the resulted tensor - needs to be multiplied with gamma.

      -> Float

      variance_epsilon: A small float number to avoid dividing by 0.

      -> Tensor v'1 t

      t: A 4D input Tensor.

      -> Tensor v'2 t

      m: A 1D mean Tensor with size matching the last dimension of t. + with the normalized tensor.

      -> Tensor Build t

      result

      batchNormWithGlobalNormalizationGrad Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => Bool

      scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

      -> Float

      variance_epsilon: A small float number to avoid dividing by 0.

      -> Tensor v'1 t

      t: A 4D input Tensor.

      -> Tensor v'2 t

      m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, - or a saved moving average thereof.

      -> Tensor v'3 t

      v: A 1D variance Tensor with size matching the last dimension of t. + or a saved moving average thereof.

      -> Tensor v'3 t

      v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, - or a saved moving average thereof.

      -> Tensor v'4 t

      gamma: A 1D gamma Tensor with size matching the last dimension of t. + or a saved moving average thereof.

      -> Tensor v'4 t

      gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this Tensor will be multiplied - with the normalized Tensor.

      -> Tensor v'5 t

      backprop: 4D backprop Tensor.

      -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

      (dx, dm, dv, db, dg)

      • dx: 4D backprop tensor for input.
      • dm: 1D backprop tensor for mean.
      • dv: 1D backprop tensor for variance.
      • db: 1D backprop tensor for beta.
      • dg: 1D backprop tensor for gamma.

      Gradients for batch normalization.

      This op is deprecated. See `tf.nn.batch_normalization`.

      batchNormWithGlobalNormalizationGrad'

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => OpParams 
      -> Bool

      scale_after_normalization: A bool indicating whether the resulted tensor - needs to be multiplied with gamma.

      -> Float

      variance_epsilon: A small float number to avoid dividing by 0.

      -> Tensor v'1 t

      t: A 4D input Tensor.

      -> Tensor v'2 t

      m: A 1D mean Tensor with size matching the last dimension of t. + with the normalized Tensor.

      -> Tensor v'5 t

      backprop: 4D backprop Tensor.

      -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

      (dx, dm, dv, db, dg)

      • dx: 4D backprop tensor for input.
      • dm: 1D backprop tensor for mean.
      • dv: 1D backprop tensor for variance.
      • db: 1D backprop tensor for beta.
      • dg: 1D backprop tensor for gamma.

      Gradients for batch normalization.

      This op is deprecated. See `tf.nn.batch_normalization`.

      batchNormWithGlobalNormalizationGrad' Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => OpParams 
      -> Bool

      scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

      -> Float

      variance_epsilon: A small float number to avoid dividing by 0.

      -> Tensor v'1 t

      t: A 4D input Tensor.

      -> Tensor v'2 t

      m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, - or a saved moving average thereof.

      -> Tensor v'3 t

      v: A 1D variance Tensor with size matching the last dimension of t. + or a saved moving average thereof.

      -> Tensor v'3 t

      v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, - or a saved moving average thereof.

      -> Tensor v'4 t

      gamma: A 1D gamma Tensor with size matching the last dimension of t. + or a saved moving average thereof.

      -> Tensor v'4 t

      gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this Tensor will be multiplied - with the normalized Tensor.

      -> Tensor v'5 t

      backprop: 4D backprop Tensor.

      -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

      (dx, dm, dv, db, dg)

      • dx: 4D backprop tensor for input.
      • dm: 1D backprop tensor for mean.
      • dv: 1D backprop tensor for variance.
      • db: 1D backprop tensor for beta.
      • dg: 1D backprop tensor for gamma.

      batchSelfAdjointEig

      Arguments

      :: OneOf `[Double, Float]` t 
      => Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchSelfAdjointEig'

      Arguments

      :: OneOf `[Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchSelfAdjointEigV2

      Arguments

      :: OneOf `[Double, Float]` t 
      => Tensor v'1 t

      input

      -> (Tensor Build t, Tensor Build t)

      (e, v)

      • e
      • v

      batchSelfAdjointEigV2'

      Arguments

      :: OneOf `[Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> (Tensor Build t, Tensor Build t)

      (e, v)

      • e
      • v

      batchSvd

      Arguments

      :: OneOf `[Complex Double, Complex Float, Double, Float]` t 
      => Tensor v'1 t

      input

      -> (Tensor Build t, Tensor Build t, Tensor Build t)

      (s, u, v)

      • s
      • u
      • v

      batchSvd'

      Arguments

      :: OneOf `[Complex Double, Complex Float, Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> (Tensor Build t, Tensor Build t, Tensor Build t)

      (s, u, v)

      • s
      • u
      • v

      batchToSpace

      Arguments

      :: (TensorType t, OneOf `[Int32, Int64]` tidx) 
      => Int64

      block_size

      -> Tensor v'1 t

      input: 4-D tensor with shape + with the normalized Tensor.

      -> Tensor v'5 t

      backprop: 4D backprop Tensor.

      -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

      (dx, dm, dv, db, dg)

      • dx: 4D backprop tensor for input.
      • dm: 1D backprop tensor for mean.
      • dv: 1D backprop tensor for variance.
      • db: 1D backprop tensor for beta.
      • dg: 1D backprop tensor for gamma.

      batchSelfAdjointEig Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchSelfAdjointEig' Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> Tensor Build t

      output

      batchSelfAdjointEigV2 Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => Tensor v'1 t

      input

      -> (Tensor Build t, Tensor Build t)

      (e, v)

      • e
      • v

      batchSelfAdjointEigV2' Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> (Tensor Build t, Tensor Build t)

      (e, v)

      • e
      • v

      batchSvd Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Double, Float] t 
      => Tensor v'1 t

      input

      -> (Tensor Build t, Tensor Build t, Tensor Build t)

      (s, u, v)

      • s
      • u
      • v

      batchSvd' Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      input

      -> (Tensor Build t, Tensor Build t, Tensor Build t)

      (s, u, v)

      • s
      • u
      • v

      batchToSpace Source #

      Arguments

      :: (TensorType t, OneOf '[Int32, Int64] tidx) 
      => Int64

      block_size

      -> Tensor v'1 t

      input: 4-D tensor with shape `[batch*block_size*block_size, height_padblock_size, width_padblock_size, depth]`. Note that the batch size of the input tensor must be divisible by - `block_size * block_size`.

      -> Tensor v'2 tidx

      crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + `block_size * block_size`.

      -> Tensor v'2 tidx

      crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies how many elements to crop from the intermediate result across the spatial - dimensions as follows:

      crops = [[crop_top, crop_bottom], [crop_left, crop_right]]

      -> Tensor Build t

      output: 4-D with shape `[batch, height, width, depth]`, where:

      height = height_pad - crop_top - crop_bottom - width = width_pad - crop_left - crop_right

      The attr block_size must be greater than one. It indicates the block size.

      Some examples:

      1. For the following input of shape `[4, 1, 1, 1]` and block_size of 2:

      ```prettyprint + dimensions as follows:

      crops = [[crop_top, crop_bottom], [crop_left, crop_right]]

      -> Tensor Build t

      output: 4-D with shape `[batch, height, width, depth]`, where:

      height = height_pad - crop_top - crop_bottom + width = width_pad - crop_left - crop_right

      The attr block_size must be greater than one. It indicates the block size.

      Some examples:

      1. For the following input of shape `[4, 1, 1, 1]` and block_size of 2:

      ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - ```

      The output tensor has shape `[1, 2, 2, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 2, 2, 1]` and value:

      ``` x = [[[[1], [2]], [[3], [4]]]] - ```

      1. For the following input of shape `[4, 1, 1, 3]` and block_size of 2:

      ```prettyprint + ```

      1. For the following input of shape `[4, 1, 1, 3]` and block_size of 2:

      ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] - ```

      The output tensor has shape `[1, 2, 2, 3]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 2, 2, 3]` and value:

      ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] - ```

      1. For the following input of shape `[4, 2, 2, 1]` and block_size of 2:

      ```prettyprint - x = [[[[1], [3]], [[5], [7]]], + ```

      1. For the following input of shape `[4, 2, 2, 1]` and block_size of 2:

      ``` + x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] - ```

      The output tensor has shape `[1, 4, 4, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 4, 4, 1]` and value:

      ``` x = [[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] - ```

      1. For the following input of shape `[8, 1, 2, 1]` and block_size of 2:

      ```prettyprint + ```

      1. For the following input of shape `[8, 1, 2, 1]` and block_size of 2:

      ``` x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] - ```

      The output tensor has shape `[2, 2, 4, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[2, 2, 4, 1]` and value:

      ``` x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], @@ -241,41 +278,41 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core cropping. This is the reverse transformation of SpaceToBatch. More specifically, this op outputs a copy of the input tensor where values from the batch dimension are moved in spatial blocks to the height and width dimensions, - followed by cropping along the height and width dimensions.

      batchToSpace'

      Arguments

      :: (TensorType t, OneOf `[Int32, Int64]` tidx) 
      => OpParams 
      -> Int64

      block_size

      -> Tensor v'1 t

      input: 4-D tensor with shape + followed by cropping along the height and width dimensions.

      batchToSpace' Source #

      Arguments

      :: (TensorType t, OneOf '[Int32, Int64] tidx) 
      => OpParams 
      -> Int64

      block_size

      -> Tensor v'1 t

      input: 4-D tensor with shape `[batch*block_size*block_size, height_padblock_size, width_padblock_size, depth]`. Note that the batch size of the input tensor must be divisible by - `block_size * block_size`.

      -> Tensor v'2 tidx

      crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + `block_size * block_size`.

      -> Tensor v'2 tidx

      crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies how many elements to crop from the intermediate result across the spatial - dimensions as follows:

      crops = [[crop_top, crop_bottom], [crop_left, crop_right]]

      -> Tensor Build t

      output: 4-D with shape `[batch, height, width, depth]`, where:

      height = height_pad - crop_top - crop_bottom - width = width_pad - crop_left - crop_right

      The attr block_size must be greater than one. It indicates the block size.

      Some examples:

      1. For the following input of shape `[4, 1, 1, 1]` and block_size of 2:

      ```prettyprint + dimensions as follows:

      crops = [[crop_top, crop_bottom], [crop_left, crop_right]]

      -> Tensor Build t

      output: 4-D with shape `[batch, height, width, depth]`, where:

      height = height_pad - crop_top - crop_bottom + width = width_pad - crop_left - crop_right

      The attr block_size must be greater than one. It indicates the block size.

      Some examples:

      1. For the following input of shape `[4, 1, 1, 1]` and block_size of 2:

      ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - ```

      The output tensor has shape `[1, 2, 2, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 2, 2, 1]` and value:

      ``` x = [[[[1], [2]], [[3], [4]]]] - ```

      1. For the following input of shape `[4, 1, 1, 3]` and block_size of 2:

      ```prettyprint + ```

      1. For the following input of shape `[4, 1, 1, 3]` and block_size of 2:

      ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] - ```

      The output tensor has shape `[1, 2, 2, 3]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 2, 2, 3]` and value:

      ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] - ```

      1. For the following input of shape `[4, 2, 2, 1]` and block_size of 2:

      ```prettyprint - x = [[[[1], [3]], [[5], [7]]], + ```

      1. For the following input of shape `[4, 2, 2, 1]` and block_size of 2:

      ``` + x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] - ```

      The output tensor has shape `[1, 4, 4, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 4, 4, 1]` and value:

      ``` x = [[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] - ```

      1. For the following input of shape `[8, 1, 2, 1]` and block_size of 2:

      ```prettyprint + ```

      1. For the following input of shape `[8, 1, 2, 1]` and block_size of 2:

      ``` x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] - ```

      The output tensor has shape `[2, 2, 4, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[2, 2, 4, 1]` and value:

      ``` x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] - ```

      batchToSpaceND

      Arguments

      :: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tcrops) 
      => Tensor v'1 t

      input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - where spatial_shape has M dimensions.

      -> Tensor v'2 tblock_shape

      block_shape: 1-D with shape `[M]`, all values must be >= 1.

      -> Tensor v'3 tcrops

      crops: 2-D with shape `[M, 2]`, all values must be >= 0. + ```

      batchToSpaceND Source #

      Arguments

      :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) 
      => Tensor v'1 t

      input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

      -> Tensor v'2 tblock_shape

      block_shape: 1-D with shape `[M]`, all values must be >= 1.

      -> Tensor v'3 tcrops

      crops: 2-D with shape `[M, 2]`, all values must be >= 0. `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input dimension `i + 1`, which corresponds to spatial dimension i. It is required that @@ -295,45 +332,45 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [batch / prod(block_shape),

      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],

      input_shape[M+1], ..., input_shape[N-1]]

      Some examples:

      1. For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and - `crops = [[0, 0], [0, 0]]`:

      ```prettyprint + `crops = [[0, 0], [0, 0]]`:

      ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - ```

      The output tensor has shape `[1, 2, 2, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 2, 2, 1]` and value:

      ``` x = [[[[1], [2]], [[3], [4]]]] ```

      1. For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and - `crops = [[0, 0], [0, 0]]`:

      ```prettyprint + `crops = [[0, 0], [0, 0]]`:

      ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] - ```

      The output tensor has shape `[1, 2, 2, 3]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 2, 2, 3]` and value:

      ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ```

      1. For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and - `crops = [[0, 0], [0, 0]]`:

      ```prettyprint - x = [[[[1], [3]], [[5], [7]]], + `crops = [[0, 0], [0, 0]]`:

      ``` + x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] - ```

      The output tensor has shape `[1, 4, 4, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 4, 4, 1]` and value:

      ``` x = [[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] ```

      1. For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and - `crops = [[0, 0], [2, 0]]`:

      ```prettyprint + `crops = [[0, 0], [2, 0]]`:

      ``` x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] - ```

      The output tensor has shape `[2, 2, 4, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[2, 2, 4, 1]` and value:

      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

      -> Tensor Build t

      output

      BatchToSpace for N-D tensors of type T.

      This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape + ```

      -> Tensor Build t

      output

      BatchToSpace for N-D tensors of type T.

      This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape `block_shape + [batch]`, interleaves these blocks back into the grid defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as the input. The spatial dimensions of this intermediate result are then optionally cropped according to crops to produce the output. This is the - reverse of SpaceToBatch. See below for a precise description.

      batchToSpaceND'

      Arguments

      :: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tcrops) 
      => OpParams 
      -> Tensor v'1 t

      input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - where spatial_shape has M dimensions.

      -> Tensor v'2 tblock_shape

      block_shape: 1-D with shape `[M]`, all values must be >= 1.

      -> Tensor v'3 tcrops

      crops: 2-D with shape `[M, 2]`, all values must be >= 0. + reverse of SpaceToBatch. See below for a precise description.

      batchToSpaceND' Source #

      Arguments

      :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) 
      => OpParams 
      -> Tensor v'1 t

      input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

      -> Tensor v'2 tblock_shape

      block_shape: 1-D with shape `[M]`, all values must be >= 1.

      -> Tensor v'3 tcrops

      crops: 2-D with shape `[M, 2]`, all values must be >= 0. `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input dimension `i + 1`, which corresponds to spatial dimension i. It is required that @@ -353,55 +390,74 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [batch / prod(block_shape),

      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],

      input_shape[M+1], ..., input_shape[N-1]]

      Some examples:

      1. For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and - `crops = [[0, 0], [0, 0]]`:

      ```prettyprint + `crops = [[0, 0], [0, 0]]`:

      ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - ```

      The output tensor has shape `[1, 2, 2, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 2, 2, 1]` and value:

      ``` x = [[[[1], [2]], [[3], [4]]]] ```

      1. For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and - `crops = [[0, 0], [0, 0]]`:

      ```prettyprint + `crops = [[0, 0], [0, 0]]`:

      ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] - ```

      The output tensor has shape `[1, 2, 2, 3]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 2, 2, 3]` and value:

      ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ```

      1. For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and - `crops = [[0, 0], [0, 0]]`:

      ```prettyprint - x = [[[[1], [3]], [[5], [7]]], + `crops = [[0, 0], [0, 0]]`:

      ``` + x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] - ```

      The output tensor has shape `[1, 4, 4, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[1, 4, 4, 1]` and value:

      ``` x = [[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] ```

      1. For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and - `crops = [[0, 0], [2, 0]]`:

      ```prettyprint + `crops = [[0, 0], [2, 0]]`:

      ``` x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] - ```

      The output tensor has shape `[2, 2, 4, 1]` and value:

      ```prettyprint + ```

      The output tensor has shape `[2, 2, 4, 1]` and value:

      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

      -> Tensor Build t

      output

      betainc

      Arguments

      :: OneOf `[Double, Float]` t 
      => Tensor v'1 t

      a

      -> Tensor v'2 t

      b

      -> Tensor v'3 t

      x

      -> Tensor Build t

      z

      Compute the regularized incomplete beta integral \(I_x(a, b)\).

      The regularized incomplete beta integral is defined as:

      ``` - I_x(a, b) = frac{B(x; a, b)}{B(a, b)} - ``` - where

      ``` - B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt - ```

      is the incomplete beta function and \(B(a, b)\) is the *complete* - beta function.

      betainc'

      Arguments

      :: OneOf `[Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      a

      -> Tensor v'2 t

      b

      -> Tensor v'3 t

      x

      -> Tensor Build t

      z

      biasAdd

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => Tensor v'1 t

      value: Any number of dimensions.

      -> Tensor v'2 t

      bias: 1-D with size the last dimension of value.

      -> Tensor Build t

      output: Broadcasted sum of value and bias.

      Adds bias to value.

      This is a special case of `tf.add` where bias is restricted to be 1-D. - Broadcasting is supported, so value may have any number of dimensions.

      biasAdd'

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      value: Any number of dimensions.

      -> Tensor v'2 t

      bias: 1-D with size the last dimension of value.

      -> Tensor Build t

      output: Broadcasted sum of value and bias.

      biasAddGrad

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => Tensor v'1 t

      out_backprop: Any number of dimensions.

      -> Tensor Build t

      output: 1-D with size the feature dimension of out_backprop.

      The backward operation for BiasAdd on the "bias" tensor.

      It accumulates all the values from out_backprop into the feature dimension. + ```

      -> Tensor Build t

      output

      betainc Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => Tensor v'1 t

      a

      -> Tensor v'2 t

      b

      -> Tensor v'3 t

      x

      -> Tensor Build t

      z

      Compute the regularized incomplete beta integral \(I_x(a, b)\).

      The regularized incomplete beta integral is defined as:

      \(I_x(a, b) = frac{B(x; a, b)}{B(a, b)}\)

      where

      \(B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt\)

      is the incomplete beta function and \(B(a, b)\) is the *complete* + beta function.

      betainc' Source #

      Arguments

      :: OneOf '[Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      a

      -> Tensor v'2 t

      b

      -> Tensor v'3 t

      x

      -> Tensor Build t

      z

      biasAdd Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => Tensor v'1 t

      value: Any number of dimensions.

      -> Tensor v'2 t

      bias: 1-D with size the last dimension of value.

      -> Tensor Build t

      output: Broadcasted sum of value and bias.

      Adds bias to value.

      This is a special case of `tf.add` where bias is restricted to be 1-D. + Broadcasting is supported, so value may have any number of dimensions.

      biasAdd' Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      value: Any number of dimensions.

      -> Tensor v'2 t

      bias: 1-D with size the last dimension of value.

      -> Tensor Build t

      output: Broadcasted sum of value and bias.

      biasAddGrad Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => Tensor v'1 t

      out_backprop: Any number of dimensions.

      -> Tensor Build t

      output: 1-D with size the feature dimension of out_backprop.

      The backward operation for BiasAdd on the "bias" tensor.

      It accumulates all the values from out_backprop into the feature dimension. For NHWC data format, the feature dimension is the last. For NCHW data format, - the feature dimension is the third-to-last.

      biasAddGrad'

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      out_backprop: Any number of dimensions.

      -> Tensor Build t

      output: 1-D with size the feature dimension of out_backprop.

      biasAddV1

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => Tensor v'1 t

      value: Any number of dimensions.

      -> Tensor v'2 t

      bias: 1-D with size the last dimension of value.

      -> Tensor Build t

      output: Broadcasted sum of value and bias.

      Adds bias to value.

      This is a deprecated version of BiasAdd and will be soon removed.

      This is a special case of `tf.add` where bias is restricted to be 1-D. - Broadcasting is supported, so value may have any number of dimensions.

      biasAddV1'

      Arguments

      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
      => OpParams 
      -> Tensor v'1 t

      value: Any number of dimensions.

      -> Tensor v'2 t

      bias: 1-D with size the last dimension of value.

      -> Tensor Build t

      output: Broadcasted sum of value and bias.

      bitcast

      Arguments

      :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` type') 
      => Tensor v'1 t

      input

      -> Tensor Build type'

      output

      Bitcasts a tensor from one type to another without copying data.

      Given a tensor input, this operation returns a tensor that has the same buffer + the feature dimension is the third-to-last.

      biasAddGrad' Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      out_backprop: Any number of dimensions.

      -> Tensor Build t

      output: 1-D with size the feature dimension of out_backprop.

      biasAddV1 Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => Tensor v'1 t

      value: Any number of dimensions.

      -> Tensor v'2 t

      bias: 1-D with size the last dimension of value.

      -> Tensor Build t

      output: Broadcasted sum of value and bias.

      Adds bias to value.

      This is a deprecated version of BiasAdd and will be soon removed.

      This is a special case of `tf.add` where bias is restricted to be 1-D. + Broadcasting is supported, so value may have any number of dimensions.

      biasAddV1' Source #

      Arguments

      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      value: Any number of dimensions.

      -> Tensor v'2 t

      bias: 1-D with size the last dimension of value.

      -> Tensor Build t

      output: Broadcasted sum of value and bias.

      bincount Source #

      Arguments

      :: OneOf '[Int32, Int64, Double, Float] t 
      => Tensor v'1 Int32

      arr: int32 Tensor.

      -> Tensor v'2 Int32

      size: non-negative int32 scalar Tensor.

      -> Tensor v'3 t

      weights: is an int32, int64, float32, or float64 Tensor with the same + shape as arr, or a length-0 Tensor, in which case it acts as all weights + equal to 1.

      -> Tensor Build t

      bins: 1D Tensor with length equal to size. The counts or summed weights for + each value in the range [0, size).

      Counts the number of occurrences of each value in an integer array.

      Outputs a vector with length size and the same dtype as weights. If + weights are empty, then index i stores the number of times the value i is + counted in arr. If weights are non-empty, then index i stores the sum of + the value in weights at each index where the corresponding value in arr is + i.

      Values in arr outside of the range [0, size) are ignored.

      bincount' Source #

      Arguments

      :: OneOf '[Int32, Int64, Double, Float] t 
      => OpParams 
      -> Tensor v'1 Int32

      arr: int32 Tensor.

      -> Tensor v'2 Int32

      size: non-negative int32 scalar Tensor.

      -> Tensor v'3 t

      weights: is an int32, int64, float32, or float64 Tensor with the same + shape as arr, or a length-0 Tensor, in which case it acts as all weights + equal to 1.

      -> Tensor Build t

      bins: 1D Tensor with length equal to size. The counts or summed weights for + each value in the range [0, size).

      bitcast Source #

      Bitcasts a tensor from one type to another without copying data.

      Given a tensor input, this operation returns a tensor that has the same buffer data as input with datatype `type`.

      If the input datatype T is larger than the output datatype `type` then the shape changes from [...] to [..., sizeof(T)/sizeof(`type`)].

      If T is smaller than `type`, the operator requires that the rightmost dimension be equal to sizeof(`type`)/sizeof(T). The shape then goes from [..., sizeof(`type`)/sizeof(T)] to [...].

      • NOTE*: Bitcast is implemented as a low-level cast, so machines with different - endian orderings will give different results.

      broadcastArgs

      Arguments

      :: OneOf `[Int32, Int64]` t 
      => Tensor v'1 t

      s0

      -> Tensor v'2 t

      s1

      -> Tensor Build t

      r0

      Return the shape of s0 op s1 with broadcast.

      Given s0 and s1, tensors that represent shapes, compute r0, the - broadcasted shape. s0, s1 and r0 are all integer vectors.

      broadcastArgs'

      Arguments

      :: OneOf `[Int32, Int64]` t 
      => OpParams 
      -> Tensor v'1 t

      s0

      -> Tensor v'2 t

      s1

      -> Tensor Build t

      r0

      broadcastGradientArgs

      Arguments

      :: OneOf `[Int32, Int64]` t 
      => Tensor v'1 t

      s0

      -> Tensor v'2 t

      s1

      -> (Tensor Build t, Tensor Build t)

      (r0, r1)

      • r0
      • r1

      Return the reduction indices for computing gradients of s0 op s1 with broadcast.

      This is typically used by gradient computations for a broadcasting operation.

      broadcastGradientArgs'

      Arguments

      :: OneOf `[Int32, Int64]` t 
      => OpParams 
      -> Tensor v'1 t

      s0

      -> Tensor v'2 t

      s1

      -> (Tensor Build t, Tensor Build t)

      (r0, r1)

      • r0
      • r1

      cTCBeamSearchDecoder

      Arguments

      :: Int64

      beam_width: A scalar >= 0 (beam search beam width).

      -> Int64

      top_paths: A scalar >= 0, <= beam_width (controls output size).

      -> Tensor v'1 Float

      inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

      -> Tensor v'2 Int32

      sequence_length: A vector containing sequence lengths, size `(batch)`.

      -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)

      (decoded_indices, decoded_values, decoded_shape, log_probability)

      • decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, + endian orderings will give different results.

      bitwiseAnd Source #

      Arguments

      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8] t 
      => Tensor v'1 t

      x

      -> Tensor v'2 t

      y

      -> Tensor Build t

      z

      Elementwise computes the bitwise AND of x and y.

      The result will have those bits set, that are set in both x and y. The + computation is performed on the underlying representations of x and y.

      bitwiseAnd' Source #

      Arguments

      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8] t 
      => OpParams 
      -> Tensor v'1 t

      x

      -> Tensor v'2 t

      y

      -> Tensor Build t

      z

      bitwiseOr Source #

      Arguments

      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8] t 
      => Tensor v'1 t

      x

      -> Tensor v'2 t

      y

      -> Tensor Build t

      z

      Elementwise computes the bitwise OR of x and y.

      The result will have those bits set, that are set in x, y or both. The + computation is performed on the underlying representations of x and y.

      bitwiseOr' Source #

      Arguments

      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8] t 
      => OpParams 
      -> Tensor v'1 t

      x

      -> Tensor v'2 t

      y

      -> Tensor Build t

      z

      bitwiseXor Source #

      Arguments

      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8] t 
      => Tensor v'1 t

      x

      -> Tensor v'2 t

      y

      -> Tensor Build t

      z

      Elementwise computes the bitwise XOR of x and y.

      The result will have those bits set, that are different in x and y. The + computation is performed on the underlying representations of x and y.

      bitwiseXor' Source #

      Arguments

      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8] t 
      => OpParams 
      -> Tensor v'1 t

      x

      -> Tensor v'2 t

      y

      -> Tensor Build t

      z

      broadcastArgs Source #

      Arguments

      :: OneOf '[Int32, Int64] t 
      => Tensor v'1 t

      s0

      -> Tensor v'2 t

      s1

      -> Tensor Build t

      r0

      Return the shape of s0 op s1 with broadcast.

      Given s0 and s1, tensors that represent shapes, compute r0, the + broadcasted shape. s0, s1 and r0 are all integer vectors.

      broadcastArgs' Source #

      Arguments

      :: OneOf '[Int32, Int64] t 
      => OpParams 
      -> Tensor v'1 t

      s0

      -> Tensor v'2 t

      s1

      -> Tensor Build t

      r0

      broadcastGradientArgs Source #

      Arguments

      :: OneOf '[Int32, Int64] t 
      => Tensor v'1 t

      s0

      -> Tensor v'2 t

      s1

      -> (Tensor Build t, Tensor Build t)

      (r0, r1)

      • r0
      • r1

      Return the reduction indices for computing gradients of s0 op s1 with broadcast.

      This is typically used by gradient computations for a broadcasting operation.

      broadcastGradientArgs' Source #

      Arguments

      :: OneOf '[Int32, Int64] t 
      => OpParams 
      -> Tensor v'1 t

      s0

      -> Tensor v'2 t

      s1

      -> (Tensor Build t, Tensor Build t)

      (r0, r1)

      • r0
      • r1

      bucketize Source #

      Arguments

      :: OneOf '[Int32, Int64, Double, Float] t 
      => Tensor v'1 t

      input: Any shape of Tensor contains with int or float type.

      -> Tensor Build Int32

      output: Same shape with input, each value of input replaced with bucket index.

      compatibility(numpy) + Equivalent to np.digitize. + end_compatibility

      Bucketizes input based on boundaries.

      For example, if the inputs are + boundaries = [0, 10, 100] + input = [[-5, 10000] + [150, 10] + [5, 100]]

      then the output will be + output = [[0, 3] + [3, 2] + [1, 3]]

      bucketize' Source #

      Arguments

      :: OneOf '[Int32, Int64, Double, Float] t 
      => OpParams 
      -> Tensor v'1 t

      input: Any shape of Tensor contains with int or float type.

      -> Tensor Build Int32

      output: Same shape with input, each value of input replaced with bucket index.

      compatibility(numpy) + Equivalent to np.digitize. + end_compatibility

      cTCBeamSearchDecoder Source #

      Arguments

      :: Int64

      beam_width: A scalar >= 0 (beam search beam width).

      -> Int64

      top_paths: A scalar >= 0, <= beam_width (controls output size).

      -> Tensor v'1 Float

      inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

      -> Tensor v'2 Int32

      sequence_length: A vector containing sequence lengths, size `(batch)`.

      -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)

      (decoded_indices, decoded_values, decoded_shape, log_probability)

      • decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, size `(total_decoded_outputs[j] x 2)`, has indices of a `SparseTensor2`. The rows store: [batch, time].
      • decoded_values: A list (length: top_paths) of values vectors. Vector j, size `(length total_decoded_outputs[j])`, has the values of a @@ -412,98 +468,110 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core this means that if consecutive entries in a beam are the same, only the first of these is emitted. That is, when the top path is "A B B B B", "A B" is returned if merge_repeated = True but "A B B B B" is - returned if merge_repeated = False.

        cTCBeamSearchDecoder'

        Arguments

        :: OpParams 
        -> Int64

        beam_width: A scalar >= 0 (beam search beam width).

        -> Int64

        top_paths: A scalar >= 0, <= beam_width (controls output size).

        -> Tensor v'1 Float

        inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

        -> Tensor v'2 Int32

        sequence_length: A vector containing sequence lengths, size `(batch)`.

        -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)

        (decoded_indices, decoded_values, decoded_shape, log_probability)

        • decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, + returned if merge_repeated = False.

          cTCBeamSearchDecoder' Source #

          Arguments

          :: OpParams 
          -> Int64

          beam_width: A scalar >= 0 (beam search beam width).

          -> Int64

          top_paths: A scalar >= 0, <= beam_width (controls output size).

          -> Tensor v'1 Float

          inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

          -> Tensor v'2 Int32

          sequence_length: A vector containing sequence lengths, size `(batch)`.

          -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)

          (decoded_indices, decoded_values, decoded_shape, log_probability)

          • decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, size `(total_decoded_outputs[j] x 2)`, has indices of a `SparseTensor2`. The rows store: [batch, time].
          • decoded_values: A list (length: top_paths) of values vectors. Vector j, size `(length total_decoded_outputs[j])`, has the values of a `SparseTensor2`. The vector stores the decoded classes for beam j.
          • decoded_shape: A list (length: top_paths) of shape vector. Vector j, size `(2)`, stores the shape of the decoded `SparseTensor[j]`. Its values are: `[batch_size, max_decoded_length[j]]`.
          • log_probability: A matrix, shaped: `(batch_size x top_paths)`. The - sequence log-probabilities.

          cTCGreedyDecoder

          Arguments

          :: Tensor v'1 Float

          inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

          -> Tensor v'2 Int32

          sequence_length: A vector containing sequence lengths, size `(batch_size)`.

          -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)

          (decoded_indices, decoded_values, decoded_shape, log_probability)

          • decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, + sequence log-probabilities.

          cTCGreedyDecoder Source #

          Arguments

          :: Tensor v'1 Float

          inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

          -> Tensor v'2 Int32

          sequence_length: A vector containing sequence lengths, size `(batch_size)`.

          -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)

          (decoded_indices, decoded_values, decoded_shape, log_probability)

          • decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, of a `SparseTensor2`. The rows store: [batch, time].
          • decoded_values: Values vector, size: `(total_decoded_outputs)`, of a `SparseTensor2`. The vector stores the decoded classes.
          • decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor. Values are: `[batch_size, max_decoded_length]`.
          • log_probability: Matrix, size `(batch_size x 1)`, containing sequence log-probabilities.

          Performs greedy decoding on the logits given in inputs.

          A note about the attribute merge_repeated: if enabled, when consecutive logits' maximum indices are the same, only the first of - these is emitted. Labeling the blank *, the sequence "A B B * B B" - becomes "A B" if merge_repeated = True and "A B B B B" if + these is emitted. Labeling the blank *, the sequence "A B B * B B" + becomes "A B B" if merge_repeated = True and "A B B B B" if merge_repeated = False.

          Regardless of the value of merge_repeated, if the maximum index of a given time and batch corresponds to the blank, index `(num_classes - 1)`, no new - element is emitted.

          cTCGreedyDecoder'

          Arguments

          :: OpParams 
          -> Tensor v'1 Float

          inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

          -> Tensor v'2 Int32

          sequence_length: A vector containing sequence lengths, size `(batch_size)`.

          -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)

          (decoded_indices, decoded_values, decoded_shape, log_probability)

          • decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, + element is emitted.

            cTCGreedyDecoder' Source #

            Arguments

            :: OpParams 
            -> Tensor v'1 Float

            inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

            -> Tensor v'2 Int32

            sequence_length: A vector containing sequence lengths, size `(batch_size)`.

            -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)

            (decoded_indices, decoded_values, decoded_shape, log_probability)

            • decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, of a `SparseTensor2`. The rows store: [batch, time].
            • decoded_values: Values vector, size: `(total_decoded_outputs)`, of a `SparseTensor2`. The vector stores the decoded classes.
            • decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor. Values are: `[batch_size, max_decoded_length]`.
            • log_probability: Matrix, size `(batch_size x 1)`, containing sequence - log-probabilities.

            cTCLoss

            Arguments

            :: Tensor v'1 Float

            inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

            -> Tensor v'2 Int64

            labels_indices: The indices of a `SparseTensor2`. + log-probabilities.

            cTCLoss Source #

            Arguments

            :: Tensor v'1 Float

            inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

            -> Tensor v'2 Int64

            labels_indices: The indices of a `SparseTensor2`. `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for - `(batch b, time t)`.

            -> Tensor v'3 Int32

            labels_values: The values (labels) associated with the given batch and time.

            -> Tensor v'4 Int32

            sequence_length: A vector containing sequence lengths (batch).

            -> (Tensor Build Float, Tensor Build Float)

            (loss, gradient)

            • loss: A vector (batch) containing log-probabilities.
            • gradient: The gradient of loss. 3-D, shape: + `(batch b, time t)`.

            -> Tensor v'3 Int32

            labels_values: The values (labels) associated with the given batch and time.

            -> Tensor v'4 Int32

            sequence_length: A vector containing sequence lengths (batch).

            -> (Tensor Build Float, Tensor Build Float)

            (loss, gradient)

            • loss: A vector (batch) containing log-probabilities.
            • gradient: The gradient of loss. 3-D, shape: `(max_time x batch_size x num_classes)`.

            Calculates the CTC Loss (log probability) for each batch entry. Also calculates

            the gradient. This class performs the softmax operation for you, so inputs - should be e.g. linear projections of outputs by an LSTM.

            cTCLoss'

            Arguments

            :: OpParams 
            -> Tensor v'1 Float

            inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

            -> Tensor v'2 Int64

            labels_indices: The indices of a `SparseTensor2`. + should be e.g. linear projections of outputs by an LSTM.

            cTCLoss' Source #

            Arguments

            :: OpParams 
            -> Tensor v'1 Float

            inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

            -> Tensor v'2 Int64

            labels_indices: The indices of a `SparseTensor2`. `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for - `(batch b, time t)`.

            -> Tensor v'3 Int32

            labels_values: The values (labels) associated with the given batch and time.

            -> Tensor v'4 Int32

            sequence_length: A vector containing sequence lengths (batch).

            -> (Tensor Build Float, Tensor Build Float)

            (loss, gradient)

            • loss: A vector (batch) containing log-probabilities.
            • gradient: The gradient of loss. 3-D, shape: - `(max_time x batch_size x num_classes)`.

            cast

            Arguments

            :: (TensorType srcT, TensorType dstT) 
            => Tensor v'1 srcT

            x

            -> Tensor Build dstT

            y

            Cast x of type SrcT to y of DstT.

            cast'

            Arguments

            :: (TensorType srcT, TensorType dstT) 
            => OpParams 
            -> Tensor v'1 srcT

            x

            -> Tensor Build dstT

            y

            ceil

            Arguments

            :: OneOf `[Word16, Double, Float]` t 
            => Tensor v'1 t

            x

            -> Tensor Build t

            y

            Returns element-wise smallest integer in not less than x.

            ceil'

            Arguments

            :: OneOf `[Word16, Double, Float]` t 
            => OpParams 
            -> Tensor v'1 t

            x

            -> Tensor Build t

            y

            checkNumerics

            Arguments

            :: OneOf `[Word16, Double, Float]` t 
            => Tensor v'1 t

            tensor

            -> Tensor Build t

            output

            Checks a tensor for NaN and Inf values.

            When run, reports an InvalidArgument error if tensor has any values - that are not a number (NaN) or infinity (Inf). Otherwise, passes tensor as-is.

            checkNumerics'

            Arguments

            :: OneOf `[Word16, Double, Float]` t 
            => OpParams 
            -> Tensor v'1 t

            tensor

            -> Tensor Build t

            output

            cholesky

            Arguments

            :: OneOf `[Double, Float]` t 
            => Tensor v'1 t

            input: Shape is `[..., M, M]`.

            -> Tensor Build t

            output: Shape is `[..., M, M]`.

            Computes the Cholesky decomposition of one or more square matrices.

            The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - form square matrices, with the same constraints as the single matrix Cholesky - decomposition above. The output is a tensor of the same shape as the input - containing the Cholesky decompositions for all input submatrices `[..., :, :]`.

            cholesky'

            Arguments

            :: OneOf `[Double, Float]` t 
            => OpParams 
            -> Tensor v'1 t

            input: Shape is `[..., M, M]`.

            -> Tensor Build t

            output: Shape is `[..., M, M]`.

            choleskyGrad

            Arguments

            :: OneOf `[Double, Float]` t 
            => Tensor v'1 t

            l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. + `(batch b, time t)`.

            -> Tensor v'3 Int32

            labels_values: The values (labels) associated with the given batch and time.

            -> Tensor v'4 Int32

            sequence_length: A vector containing sequence lengths (batch).

            -> (Tensor Build Float, Tensor Build Float)

            (loss, gradient)

            • loss: A vector (batch) containing log-probabilities.
            • gradient: The gradient of loss. 3-D, shape: + `(max_time x batch_size x num_classes)`.

            cacheDataset Source #

            Arguments

            :: MonadBuild m' 
            => [DataType]

            output_types

            -> Tensor v'1 ResourceHandle

            input_dataset

            -> Tensor v'2 ByteString

            filename: A path on the filesystem where we should cache the dataset. Note: this + will be a directory.

            -> m' (Tensor Value ResourceHandle)

            handle

            Creates a dataset that caches elements from input_dataset.

            A CacheDataset will iterate over the input_dataset, and store tensors. If the + cache already exists, the cache will be used. If the cache is inappropriate + (e.g. cannot be opened, contains tensors of the wrong shape / size), an error + will the returned when used.

            cacheDataset' Source #

            Arguments

            :: MonadBuild m' 
            => OpParams 
            -> [DataType]

            output_types

            -> Tensor v'1 ResourceHandle

            input_dataset

            -> Tensor v'2 ByteString

            filename: A path on the filesystem where we should cache the dataset. Note: this + will be a directory.

            -> m' (Tensor Value ResourceHandle)

            handle

            cast Source #

            Arguments

            :: (TensorType srcT, TensorType dstT) 
            => Tensor v'1 srcT

            x

            -> Tensor Build dstT

            y

            Cast x of type SrcT to y of DstT.

            cast' Source #

            Arguments

            :: (TensorType srcT, TensorType dstT) 
            => OpParams 
            -> Tensor v'1 srcT

            x

            -> Tensor Build dstT

            y

            ceil Source #

            Arguments

            :: OneOf '[Word16, Double, Float] t 
            => Tensor v'1 t

            x

            -> Tensor Build t

            y

            Returns element-wise smallest integer in not less than x.

            ceil' Source #

            Arguments

            :: OneOf '[Word16, Double, Float] t 
            => OpParams 
            -> Tensor v'1 t

            x

            -> Tensor Build t

            y

            checkNumerics Source #

            Arguments

            :: OneOf '[Word16, Double, Float] t 
            => Tensor v'1 t

            tensor

            -> Tensor Build t

            output

            Checks a tensor for NaN and Inf values.

            When run, reports an InvalidArgument error if tensor has any values + that are not a number (NaN) or infinity (Inf). Otherwise, passes tensor as-is.

            checkNumerics' Source #

            Arguments

            :: OneOf '[Word16, Double, Float] t 
            => OpParams 
            -> Tensor v'1 t

            tensor

            -> Tensor Build t

            output

            cholesky Source #

            Arguments

            :: OneOf '[Complex Double, Complex Float, Double, Float] t 
            => Tensor v'1 t

            input: Shape is `[..., M, M]`.

            -> Tensor Build t

            output: Shape is `[..., M, M]`.

            Computes the Cholesky decomposition of one or more square matrices.

            The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices.

            The input has to be symmetric and positive definite. Only the lower-triangular + part of the input will be used for this operation. The upper-triangular part + will not be read.

            The output is a tensor of the same shape as the input + containing the Cholesky decompositions for all input submatrices `[..., :, :]`.

            • *Note**: The gradient computation on GPU is faster for large matrices but + not for large batch dimensions when the submatrices are small. In this + case it might be faster to use the CPU.

            cholesky' Source #

            Arguments

            :: OneOf '[Complex Double, Complex Float, Double, Float] t 
            => OpParams 
            -> Tensor v'1 t

            input: Shape is `[..., M, M]`.

            -> Tensor Build t

            output: Shape is `[..., M, M]`.

            choleskyGrad Source #

            Arguments

            :: OneOf '[Double, Float] t 
            => Tensor v'1 t

            l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. Algorithm depends only on lower triangular part of the innermost matrices of - this tensor.

            -> Tensor v'2 t

            grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. + this tensor.

            -> Tensor v'2 t

            grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. Algorithm depends only on lower triangular part of the innermost matrices of - this tensor.

            -> Tensor Build t

            output: Symmetrized version of df/dA . Shape is `[..., M, M]`

            Computes the reverse mode backpropagated gradient of the Cholesky algorithm.

            For an explanation see "Differentiation of the Cholesky algorithm" by - Iain Murray http://arxiv.org/abs/1602.07527.

            choleskyGrad'

            Arguments

            :: OneOf `[Double, Float]` t 
            => OpParams 
            -> Tensor v'1 t

            l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. + this tensor.

            -> Tensor Build t

            output: Symmetrized version of df/dA . Shape is `[..., M, M]`

            Computes the reverse mode backpropagated gradient of the Cholesky algorithm.

            For an explanation see "Differentiation of the Cholesky algorithm" by + Iain Murray http://arxiv.org/abs/1602.07527.

            choleskyGrad' Source #

            Arguments

            :: OneOf '[Double, Float] t 
            => OpParams 
            -> Tensor v'1 t

            l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. Algorithm depends only on lower triangular part of the innermost matrices of - this tensor.

            -> Tensor v'2 t

            grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. + this tensor.

            -> Tensor v'2 t

            grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. Algorithm depends only on lower triangular part of the innermost matrices of - this tensor.

            -> Tensor Build t

            output: Symmetrized version of df/dA . Shape is `[..., M, M]`

            complex

            Arguments

            :: (OneOf `[Double, Float]` t, OneOf `[Complex Double, Complex Float]` tout) 
            => Tensor v'1 t

            real

            -> Tensor v'2 t

            imag

            -> Tensor Build tout

            out

            Converts two real numbers to a complex number.

            Given a tensor real representing the real part of a complex number, and a + this tensor.

            -> Tensor Build t

            output: Symmetrized version of df/dA . Shape is `[..., M, M]`

            complex Source #

            Arguments

            :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) 
            => Tensor v'1 t

            real

            -> Tensor v'2 t

            imag

            -> Tensor Build tout

            out

            Converts two real numbers to a complex number.

            Given a tensor real representing the real part of a complex number, and a tensor imag representing the imaginary part of a complex number, this operation returns complex numbers elementwise of the form \(a + bj\), where *a* represents the real part and *b* represents the imag part.

            The input tensors real and imag must have the same shape.

            For example:

            ``` # tensor real is [2.25, 3.25] # tensor imag is [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] - ```

            complex'

            Arguments

            :: (OneOf `[Double, Float]` t, OneOf `[Complex Double, Complex Float]` tout) 
            => OpParams 
            -> Tensor v'1 t

            real

            -> Tensor v'2 t

            imag

            -> Tensor Build tout

            out

            complexAbs

            Arguments

            :: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
            => Tensor v'1 t

            x

            -> Tensor Build tout

            y

            Computes the complex absolute value of a tensor.

            Given a tensor x of complex numbers, this operation returns a tensor of type + ```

            complex' Source #

            Arguments

            :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) 
            => OpParams 
            -> Tensor v'1 t

            real

            -> Tensor v'2 t

            imag

            -> Tensor Build tout

            out

            complexAbs Source #

            Arguments

            :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) 
            => Tensor v'1 t

            x

            -> Tensor Build tout

            y

            Computes the complex absolute value of a tensor.

            Given a tensor x of complex numbers, this operation returns a tensor of type float or double that is the absolute value of each element in x. All elements in x must be complex numbers of the form \(a + bj\). The absolute - value is computed as \( sqrt{a^2 + b^2}\).

            complexAbs'

            Arguments

            :: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
            => OpParams 
            -> Tensor v'1 t

            x

            -> Tensor Build tout

            y

            computeAccidentalHits

            Arguments

            :: Int64

            num_true: Number of true labels per context.

            -> Tensor v'1 Int64

            true_classes: The true_classes output of UnpackSparseLabels.

            -> Tensor v'2 Int64

            sampled_candidates: The sampled_candidates output of CandidateSampler.

            -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)

            (indices, ids, weights)

            • indices: A vector of indices corresponding to rows of true_candidates.
            • ids: A vector of IDs of positions in sampled_candidates that match a true_label + value is computed as \( sqrt{a^2 + b^2}\).

              complexAbs' Source #

              Arguments

              :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build tout

              y

              computeAccidentalHits Source #

              Arguments

              :: Int64

              num_true: Number of true labels per context.

              -> Tensor v'1 Int64

              true_classes: The true_classes output of UnpackSparseLabels.

              -> Tensor v'2 Int64

              sampled_candidates: The sampled_candidates output of CandidateSampler.

              -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)

              (indices, ids, weights)

              • indices: A vector of indices corresponding to rows of true_candidates.
              • ids: A vector of IDs of positions in sampled_candidates that match a true_label for the row with the corresponding index in indices.
              • weights: A vector of the same length as indices and ids, in which each element is -FLOAT_MAX.

              Computes the ids of the positions in sampled_candidates that match true_labels.

              When doing log-odds NCE, the result of this op should be passed through a SparseToDense op, then added to the logits of the sampled candidates. This has the effect of removing the sampled labels that match the true labels by - making the classifier sure that they are sampled labels.

              computeAccidentalHits'

              Arguments

              :: OpParams 
              -> Int64

              num_true: Number of true labels per context.

              -> Tensor v'1 Int64

              true_classes: The true_classes output of UnpackSparseLabels.

              -> Tensor v'2 Int64

              sampled_candidates: The sampled_candidates output of CandidateSampler.

              -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)

              (indices, ids, weights)

              • indices: A vector of indices corresponding to rows of true_candidates.
              • ids: A vector of IDs of positions in sampled_candidates that match a true_label + making the classifier sure that they are sampled labels.

                computeAccidentalHits' Source #

                Arguments

                :: OpParams 
                -> Int64

                num_true: Number of true labels per context.

                -> Tensor v'1 Int64

                true_classes: The true_classes output of UnpackSparseLabels.

                -> Tensor v'2 Int64

                sampled_candidates: The sampled_candidates output of CandidateSampler.

                -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)

                (indices, ids, weights)

                • indices: A vector of indices corresponding to rows of true_candidates.
                • ids: A vector of IDs of positions in sampled_candidates that match a true_label for the row with the corresponding index in indices.
                • weights: A vector of the same length as indices and ids, in which each element - is -FLOAT_MAX.

                concat

                Arguments

                :: TensorType t 
                => Tensor v'1 Int32

                concat_dim: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

                -> [Tensor v'2 t]

                values: The N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

                -> Tensor Build t

                output: A Tensor with the concatenation of values stacked along the + is -FLOAT_MAX.

                concat Source #

                Arguments

                :: TensorType t 
                => Tensor v'1 Int32

                concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

                -> [Tensor v'2 t]

                values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

                -> Tensor Build t

                output: A Tensor with the concatenation of values stacked along the concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.

                Concatenates tensors along one dimension.

                concat'

                Arguments

                :: TensorType t 
                => OpParams 
                -> Tensor v'1 Int32

                concat_dim: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

                -> [Tensor v'2 t]

                values: The N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

                -> Tensor Build t

                output: A Tensor with the concatenation of values stacked along the + in concat_dim where it has the sum of the sizes.

                Concatenates tensors along one dimension.

                concat' Source #

                Arguments

                :: TensorType t 
                => OpParams 
                -> Tensor v'1 Int32

                concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

                -> [Tensor v'2 t]

                values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

                -> Tensor Build t

                output: A Tensor with the concatenation of values stacked along the concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.

                concatOffset

                Arguments

                :: Tensor v'1 Int32

                concat_dim: The dimension along which to concatenate.

                -> [Tensor v'2 Int32]

                shape: The N int32 vectors representing shape of tensors being concatenated.

                -> [Tensor Build Int32]

                offset: The N int32 vectors representing the starting offset - of input tensors within the concatenated output.

                This is typically used by gradient computations for a concat operation.

                Computes offsets of concat inputs within its output.

                For example:

                ```prettyprint + in concat_dim where it has the sum of the sizes.

              concatOffset Source #

              Arguments

              :: Tensor v'1 Int32

              concat_dim: The dimension along which to concatenate.

              -> [Tensor v'2 Int32]

              shape: The N int32 vectors representing shape of tensors being concatenated.

              -> [Tensor Build Int32]

              offset: The N int32 vectors representing the starting offset + of input tensors within the concatenated output.

              Computes offsets of concat inputs within its output.

              For example:

              ``` # x is [2, 2, 7] # y is [2, 3, 7] # z is [2, 5, 7] concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] - ```

              concatOffset'

              Arguments

              :: OpParams 
              -> Tensor v'1 Int32

              concat_dim: The dimension along which to concatenate.

              -> [Tensor v'2 Int32]

              shape: The N int32 vectors representing shape of tensors being concatenated.

              -> [Tensor Build Int32]

              offset: The N int32 vectors representing the starting offset - of input tensors within the concatenated output.

              This is typically used by gradient computations for a concat operation.

              concatV2

              Arguments

              :: (TensorType t, OneOf `[Int32, Int64]` tidx) 
              => [Tensor v'1 t]

              values: List of N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

              -> Tensor v'2 tidx

              axis: 0-D. The dimension along which to concatenate. Must be in the - range [-rank(values), rank(values)).

              -> Tensor Build t

              output: A Tensor with the concatenation of values stacked along the + ```

              This is typically used by gradient computations for a concat operation.

              concatOffset' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 Int32

              concat_dim: The dimension along which to concatenate.

              -> [Tensor v'2 Int32]

              shape: The N int32 vectors representing shape of tensors being concatenated.

              -> [Tensor Build Int32]

              offset: The N int32 vectors representing the starting offset + of input tensors within the concatenated output.

              concatV2 Source #

              Arguments

              :: (TensorType t, OneOf '[Int32, Int64] tidx) 
              => [Tensor v'1 t]

              values: List of N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

              -> Tensor v'2 tidx

              axis: 0-D. The dimension along which to concatenate. Must be in the + range [-rank(values), rank(values)).

              -> Tensor Build t

              output: A Tensor with the concatenation of values stacked along the concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.

              Concatenates tensors along one dimension.

              concatV2'

              Arguments

              :: (TensorType t, OneOf `[Int32, Int64]` tidx) 
              => OpParams 
              -> [Tensor v'1 t]

              values: List of N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

              -> Tensor v'2 tidx

              axis: 0-D. The dimension along which to concatenate. Must be in the - range [-rank(values), rank(values)).

              -> Tensor Build t

              output: A Tensor with the concatenation of values stacked along the + in concat_dim where it has the sum of the sizes.

              Concatenates tensors along one dimension.

              concatV2' Source #

              Arguments

              :: (TensorType t, OneOf '[Int32, Int64] tidx) 
              => OpParams 
              -> [Tensor v'1 t]

              values: List of N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

              -> Tensor v'2 tidx

              axis: 0-D. The dimension along which to concatenate. Must be in the + range [-rank(values), rank(values)).

              -> Tensor Build t

              output: A Tensor with the concatenation of values stacked along the concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.

              conditionalAccumulator

              Arguments

              :: MonadBuild m' 
              => DataType

              dtype: The type of the value being accumulated.

              -> Shape

              shape: The shape of the values, can be [], in which case shape is unknown.

              -> m' (Tensor Ref ByteString)

              handle: The handle to the accumulator.

              A conditional accumulator for aggregating gradients. The accumulator accepts

              gradients marked with local_step greater or equal to the most recent global_step - known to the accumulator. The average can be extracted from the accumulator, - provided sufficient gradients have been accumulated. Extracting the average - automatically resets the aggregate to 0, and increments the global_step recorded - by the accumulator.

              conditionalAccumulator'

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> DataType

              dtype: The type of the value being accumulated.

              -> Shape

              shape: The shape of the values, can be [], in which case shape is unknown.

              -> m' (Tensor Ref ByteString)

              handle: The handle to the accumulator.

              conj

              Arguments

              :: OneOf `[Complex Double, Complex Float]` t 
              => Tensor v'1 t

              input

              -> Tensor Build t

              output

              Returns the complex conjugate of a complex number.

              Given a tensor input of complex numbers, this operation returns a tensor of + in concat_dim where it has the sum of the sizes.

              concatenateDataset Source #

              Arguments

              :: MonadBuild m' 
              => [DataType]

              output_types

              -> Tensor v'1 ResourceHandle

              input_dataset

              -> Tensor v'2 ResourceHandle

              another_dataset

              -> m' (Tensor Value ResourceHandle)

              handle

              Creates a dataset that concatenates input_dataset with another_dataset.

              concatenateDataset' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> [DataType]

              output_types

              -> Tensor v'1 ResourceHandle

              input_dataset

              -> Tensor v'2 ResourceHandle

              another_dataset

              -> m' (Tensor Value ResourceHandle)

              handle

              conditionalAccumulator Source #

              Arguments

              :: MonadBuild m' 
              => DataType

              dtype: The type of the value being accumulated.

              -> Shape

              shape: The shape of the values, can be [], in which case shape is unknown.

              -> m' (Tensor Ref ByteString)

              handle: The handle to the accumulator.

              A conditional accumulator for aggregating gradients.

              The accumulator accepts gradients marked with local_step greater or + equal to the most recent global_step known to the accumulator. The + average can be extracted from the accumulator, provided sufficient + gradients have been accumulated. Extracting the average automatically + resets the aggregate to 0, and increments the global_step recorded by + the accumulator.

              conditionalAccumulator' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> DataType

              dtype: The type of the value being accumulated.

              -> Shape

              shape: The shape of the values, can be [], in which case shape is unknown.

              -> m' (Tensor Ref ByteString)

              handle: The handle to the accumulator.

              conj Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float] t 
              => Tensor v'1 t

              input

              -> Tensor Build t

              output

              Returns the complex conjugate of a complex number.

              Given a tensor input of complex numbers, this operation returns a tensor of complex numbers that are the complex conjugate of each element in input. The complex numbers in input must be of the form \(a + bj\), where *a* is the real part and *b* is the imaginary part.

              The complex conjugate returned by this operation is of the form \(a - bj\).

              For example:

              ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] - ```

              conj'

              Arguments

              :: OneOf `[Complex Double, Complex Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor Build t

              output

              const

              Arguments

              :: TensorType dtype 
              => Tensor Build dtype

              output

              Returns a constant tensor.

              const'

              Arguments

              :: TensorType dtype 
              => OpParams 
              -> Tensor Build dtype

              output

              controlTrigger :: forall m'. MonadBuild m' => m' ControlNode

              Does nothing. Serves as a control trigger for scheduling.

              Only useful as a placeholder for control edges.

              conv2D

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => Tensor v'1 t

              input

              -> Tensor v'2 t

              filter

              -> Tensor Build t

              output

              Computes a 2-D convolution given 4-D input and filter tensors.

              Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + ```

              conj' Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float] t 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor Build t

              output

              const Source #

              Arguments

              :: TensorType dtype 
              => Tensor Build dtype

              output

              Returns a constant tensor.

              const' Source #

              Arguments

              :: TensorType dtype 
              => OpParams 
              -> Tensor Build dtype

              output

              controlTrigger :: forall m'. MonadBuild m' => m' ControlNode Source #

              Does nothing. Serves as a control trigger for scheduling.

              Only useful as a placeholder for control edges.

              conv2D Source #

              Arguments

              :: OneOf '[Word16, Float] t 
              => Tensor v'1 t

              input: A 4-D tensor. The dimension order is interpreted according to the value + of data_format, see below for details.

              -> Tensor v'2 t

              filter: A 4-D tensor of shape + `[filter_height, filter_width, in_channels, out_channels]`

              -> Tensor Build t

              output: A 4-D tensor. The dimension order is determined by the value of + data_format, see below for details.

              Computes a 2-D convolution given 4-D input and filter tensors.

              Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following:

              1. Flattens the filter to a 2-D matrix with shape @@ -513,93 +581,94 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core vector.

              In detail, with the default NHWC format,

              output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k]

              Must have `strides[0] = strides[3] = 1`. For the most common case of the same - horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

              conv2D'

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor v'2 t

              filter

              -> Tensor Build t

              output

              conv2DBackpropFilter

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, - where filter is a 4-D - `[filter_height, filter_width, in_channels, out_channels]` tensor.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape + horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

              conv2D' Source #

              Arguments

              :: OneOf '[Word16, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: A 4-D tensor. The dimension order is interpreted according to the value + of data_format, see below for details.

              -> Tensor v'2 t

              filter: A 4-D tensor of shape + `[filter_height, filter_width, in_channels, out_channels]`

              -> Tensor Build t

              output: A 4-D tensor. The dimension order is determined by the value of + data_format, see below for details.

              conv2DBackpropFilter Source #

              Arguments

              :: OneOf '[Word16, Float] t 
              => Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, out_channels]` tensor.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. - the filter input of the convolution.

              Computes the gradients of convolution with respect to the filter.

              conv2DBackpropFilter'

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, - where filter is a 4-D - `[filter_height, filter_width, in_channels, out_channels]` tensor.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape + the filter input of the convolution.

              Computes the gradients of convolution with respect to the filter.

              conv2DBackpropFilter' Source #

              Arguments

              :: OneOf '[Word16, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, out_channels]` tensor.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. - the filter input of the convolution.

              conv2DBackpropInput

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => Tensor v'1 Int32

              input_sizes: An integer vector representing the shape of input, - where input is a 4-D `[batch, height, width, channels]` tensor.

              -> Tensor v'2 t

              filter: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient - w.r.t. the input of the convolution.

              Computes the gradients of convolution with respect to the input.

              conv2DBackpropInput'

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 Int32

              input_sizes: An integer vector representing the shape of input, - where input is a 4-D `[batch, height, width, channels]` tensor.

              -> Tensor v'2 t

              filter: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient - w.r.t. the input of the convolution.

              conv3D

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[filter_depth, filter_height, filter_width, in_channels, - out_channels]`. in_channels must match between input and filter.

              -> Tensor Build t

              output

              Computes a 3-D convolution given 5-D input and filter tensors.

              In signal processing, cross-correlation is a measure of similarity of + the filter input of the convolution.

              conv2DBackpropInput Source #

              Arguments

              :: OneOf '[Word16, Float] t 
              => Tensor v'1 Int32

              input_sizes: An integer vector representing the shape of input, + where input is a 4-D `[batch, height, width, channels]` tensor.

              -> Tensor v'2 t

              filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient + w.r.t. the input of the convolution.

              Computes the gradients of convolution with respect to the input.

              conv2DBackpropInput' Source #

              Arguments

              :: OneOf '[Word16, Float] t 
              => OpParams 
              -> Tensor v'1 Int32

              input_sizes: An integer vector representing the shape of input, + where input is a 4-D `[batch, height, width, channels]` tensor.

              -> Tensor v'2 t

              filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient + w.r.t. the input of the convolution.

              conv3D Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => Tensor v'1 t

              input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[filter_depth, filter_height, filter_width, in_channels, + out_channels]`. in_channels must match between input and filter.

              -> Tensor Build t

              output

              Computes a 3-D convolution given 5-D input and filter tensors.

              In signal processing, cross-correlation is a measure of similarity of two waveforms as a function of a time-lag applied to one of them. This - is also known as a sliding dot product or sliding inner-product.

              Our Conv3D implements a form of cross-correlation.

              conv3D'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[filter_depth, filter_height, filter_width, in_channels, - out_channels]`. in_channels must match between input and filter.

              -> Tensor Build t

              output

              conv3DBackpropFilter

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

              -> Tensor Build t

              output

              Computes the gradients of 3-D convolution with respect to the filter.

              conv3DBackpropFilter'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

              -> Tensor Build t

              output

              conv3DBackpropFilterV2

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, - where filter is a 5-D + is also known as a sliding dot product or sliding inner-product.

              Our Conv3D implements a form of cross-correlation.

              conv3D' Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[filter_depth, filter_height, filter_width, in_channels, + out_channels]`. in_channels must match between input and filter.

              -> Tensor Build t

              output

              conv3DBackpropFilter Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

              -> Tensor Build t

              output

              Computes the gradients of 3-D convolution with respect to the filter.

              conv3DBackpropFilter' Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

              -> Tensor Build t

              output

              conv3DBackpropFilterV2 Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 5-D `[filter_depth, filter_height, filter_width, in_channels, out_channels]` - tensor.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

              -> Tensor Build t

              output

              Computes the gradients of 3-D convolution with respect to the filter.

              conv3DBackpropFilterV2'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, - where filter is a 5-D + tensor.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

              -> Tensor Build t

              output

              Computes the gradients of 3-D convolution with respect to the filter.

              conv3DBackpropFilterV2' Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 5-D `[filter_depth, filter_height, filter_width, in_channels, out_channels]` - tensor.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

              -> Tensor Build t

              output

              conv3DBackpropInput

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

              -> Tensor Build t

              output

              Computes the gradients of 3-D convolution with respect to the input.

              conv3DBackpropInput'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

              -> Tensor Build t

              output

              conv3DBackpropInputV2

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 Int32

              input_sizes: An integer vector representing the tensor shape of input, + tensor.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

              -> Tensor Build t

              output

              conv3DBackpropInput Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

              -> Tensor Build t

              output

              Computes the gradients of 3-D convolution with respect to the input.

              conv3DBackpropInput' Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: Shape `[batch, depth, rows, cols, in_channels]`.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

              -> Tensor Build t

              output

              conv3DBackpropInputV2 Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => Tensor v'1 Int32

              input_sizes: An integer vector representing the tensor shape of input, where input is a 5-D - `[batch, depth, rows, cols, in_channels]` tensor.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

              -> Tensor Build t

              output

              Computes the gradients of 3-D convolution with respect to the input.

              conv3DBackpropInputV2'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 Int32

              input_sizes: An integer vector representing the tensor shape of input, + `[batch, depth, rows, cols, in_channels]` tensor.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

              -> Tensor Build t

              output

              Computes the gradients of 3-D convolution with respect to the input.

              conv3DBackpropInputV2' Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => OpParams 
              -> Tensor v'1 Int32

              input_sizes: An integer vector representing the tensor shape of input, where input is a 5-D - `[batch, depth, rows, cols, in_channels]` tensor.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

              -> Tensor Build t

              output

              copy

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              input: Input tensor.

              -> Tensor Build t

              output: Output tensor, deep-copied from input.

              Copy Op.

              Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the - device on which the tensor is allocated.

              Unlike the CopyHost Op, this op does not have HostMemory constraint on its - input or output.

              copy'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              input: Input tensor.

              -> Tensor Build t

              output: Output tensor, deep-copied from input.

              copyHost

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              input: Input tensor.

              -> Tensor Build t

              output: Output tensor, deep-copied from input.

              Copy Host Op.

              Performs CPU-to-CPU deep-copying of tensor.

              Unlike the Copy Op, this op has HostMemory constraint on its input or output.

              copyHost'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              input: Input tensor.

              -> Tensor Build t

              output: Output tensor, deep-copied from input.

              cos

              Arguments

              :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes cos of x element-wise.

              cos'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build t

              y

              countUpTo

              Arguments

              :: (MonadBuild m', OneOf `[Int32, Int64]` t) 
              => Int64

              limit: If incrementing ref would bring it above limit, instead generates an - OutOfRange error.

              -> Tensor Ref t

              ref: Should be from a scalar Variable node.

              -> m' (Tensor Value t)

              output: A copy of the input before increment. If nothing else modifies the - input, the values produced will all be distinct.

              Increments ref until it reaches limit.

              countUpTo'

              Arguments

              :: (MonadBuild m', OneOf `[Int32, Int64]` t) 
              => OpParams 
              -> Int64

              limit: If incrementing ref would bring it above limit, instead generates an - OutOfRange error.

              -> Tensor Ref t

              ref: Should be from a scalar Variable node.

              -> m' (Tensor Value t)

              output: A copy of the input before increment. If nothing else modifies the - input, the values produced will all be distinct.

              cropAndResize

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - Both image_height and image_width need to be positive.

              -> Tensor v'2 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + `[batch, depth, rows, cols, in_channels]` tensor.

              -> Tensor v'2 t

              filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

              -> Tensor v'3 t

              out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

              -> Tensor Build t

              output

              cos Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes cos of x element-wise.

              cosh Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes hyperbolic cosine of x element-wise.

              countUpTo Source #

              Arguments

              :: (MonadBuild m', OneOf '[Int32, Int64] t) 
              => Int64

              limit: If incrementing ref would bring it above limit, instead generates an + OutOfRange error.

              -> Tensor Ref t

              ref: Should be from a scalar Variable node.

              -> m' (Tensor Value t)

              output: A copy of the input before increment. If nothing else modifies the + input, the values produced will all be distinct.

              Increments ref until it reaches limit.

              countUpTo' Source #

              Arguments

              :: (MonadBuild m', OneOf '[Int32, Int64] t) 
              => OpParams 
              -> Int64

              limit: If incrementing ref would bring it above limit, instead generates an + OutOfRange error.

              -> Tensor Ref t

              ref: Should be from a scalar Variable node.

              -> m' (Tensor Value t)

              output: A copy of the input before increment. If nothing else modifies the + input, the values produced will all be distinct.

              cropAndResize Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => Tensor v'1 t

              image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

              -> Tensor v'2 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of y is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to - `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + `[0, image_height - 1]` in image height coordinates. We do allow y1 > y2, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use - extrapolation_value to extrapolate the input image values.

              -> Tensor v'3 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor v'4 Int32

              crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All + extrapolation_value to extrapolate the input image values.

              -> Tensor v'3 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor v'4 Int32

              crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be - positive.

              -> Tensor Build Float

              crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              Extracts crops from the input image tensor and bilinearly resizes them (possibly

              with aspect ratio change) to a common output size specified by crop_size. This + positive.

              -> Tensor Build Float

              crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              Extracts crops from the input image tensor and bilinearly resizes them (possibly

              with aspect ratio change) to a common output size specified by crop_size. This is more general than the crop_to_bounding_box op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change.

              Returns a tensor with crops from the input image at positions defined at the bounding box locations in boxes. The cropped boxes are all resized (with bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The - result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.

              cropAndResize'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - Both image_height and image_width need to be positive.

              -> Tensor v'2 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.

              cropAndResize' Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

              -> Tensor v'2 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of y is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to - `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + `[0, image_height - 1]` in image height coordinates. We do allow y1 > y2, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use - extrapolation_value to extrapolate the input image values.

              -> Tensor v'3 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor v'4 Int32

              crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All + extrapolation_value to extrapolate the input image values.

              -> Tensor v'3 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor v'4 Int32

              crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be - positive.

              -> Tensor Build Float

              crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              cropAndResizeGradBoxes

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 Float

              grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              -> Tensor v'2 t

              image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - Both image_height and image_width need to be positive.

              -> Tensor v'3 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + positive.

              -> Tensor Build Float

              crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              cropAndResizeGradBoxes Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => Tensor v'1 Float

              grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              -> Tensor v'2 t

              image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

              -> Tensor v'3 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of y is mapped to the image coordinate at `y * (image_height - 1)`, so as the @@ -608,9 +677,9 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use - extrapolation_value to extrapolate the input image values.

              -> Tensor v'4 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor Build Float

              output: A 2-D tensor of shape `[num_boxes, 4]`.

              Computes the gradient of the crop_and_resize op wrt the input boxes tensor.

              cropAndResizeGradBoxes'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 Float

              grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              -> Tensor v'2 t

              image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - Both image_height and image_width need to be positive.

              -> Tensor v'3 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + extrapolation_value to extrapolate the input image values.

              -> Tensor v'4 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor Build Float

              output: A 2-D tensor of shape `[num_boxes, 4]`.

              Computes the gradient of the crop_and_resize op wrt the input boxes tensor.

              cropAndResizeGradBoxes' Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => OpParams 
              -> Tensor v'1 Float

              grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              -> Tensor v'2 t

              image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

              -> Tensor v'3 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of y is mapped to the image coordinate at `y * (image_height - 1)`, so as the @@ -619,8 +688,8 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use - extrapolation_value to extrapolate the input image values.

              -> Tensor v'4 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor Build Float

              output: A 2-D tensor of shape `[num_boxes, 4]`.

              cropAndResizeGradImage

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => Tensor v'1 Float

              grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              -> Tensor v'2 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + extrapolation_value to extrapolate the input image values.

              -> Tensor v'4 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor Build Float

              output: A 2-D tensor of shape `[num_boxes, 4]`.

              cropAndResizeGradImage Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => Tensor v'1 Float

              grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              -> Tensor v'2 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of y is mapped to the image coordinate at `y * (image_height - 1)`, so as the @@ -629,10 +698,10 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use - extrapolation_value to extrapolate the input image values.

              -> Tensor v'3 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor v'4 Int32

              image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` + extrapolation_value to extrapolate the input image values.

              -> Tensor v'3 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor v'4 Int32

              image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` containing the original image size. Both image_height and image_width need - to be positive.

              -> Tensor Build t

              output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.

              Computes the gradient of the crop_and_resize op wrt the input image tensor.

              cropAndResizeGradImage'

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 Float

              grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              -> Tensor v'2 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + to be positive.

              -> Tensor Build t

              output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.

              Computes the gradient of the crop_and_resize op wrt the input image tensor.

              cropAndResizeGradImage' Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => OpParams 
              -> Tensor v'1 Float

              grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

              -> Tensor v'2 Float

              boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of y is mapped to the image coordinate at `y * (image_height - 1)`, so as the @@ -641,139 +710,111 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use - extrapolation_value to extrapolate the input image values.

              -> Tensor v'3 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor v'4 Int32

              image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` + extrapolation_value to extrapolate the input image values.

              -> Tensor v'3 Int32

              box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

              -> Tensor v'4 Int32

              image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` containing the original image size. Both image_height and image_width need - to be positive.

              -> Tensor Build t

              output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.

              cross

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              a: A tensor containing 3-element vectors.

              -> Tensor v'2 t

              b: Another tensor, of same type and shape as a.

              -> Tensor Build t

              product: Pairwise cross product of the vectors in a and b.

              Compute the pairwise cross product.

              a and b must be the same shape; they can either be simple 3-element vectors, + to be positive.

              -> Tensor Build t

              output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.

              cross Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => Tensor v'1 t

              a: A tensor containing 3-element vectors.

              -> Tensor v'2 t

              b: Another tensor, of same type and shape as a.

              -> Tensor Build t

              product: Pairwise cross product of the vectors in a and b.

              Compute the pairwise cross product.

              a and b must be the same shape; they can either be simple 3-element vectors, or any shape where the innermost dimension is 3. In the latter case, each pair - of corresponding 3-element vectors is cross-multiplied independently.

              cross'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              a: A tensor containing 3-element vectors.

              -> Tensor v'2 t

              b: Another tensor, of same type and shape as a.

              -> Tensor Build t

              product: Pairwise cross product of the vectors in a and b.

              cumprod

              Arguments

              :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
              => Tensor v'1 t

              x

              -> Tensor v'2 tidx

              axis

              -> Tensor Build t

              out

              Compute the cumulative product of the tensor x along axis.

              By default, this op performs an inclusive cumprod, which means that the first - element of the input is identical to the first element of the output: - ```prettyprint - tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c] - ```

              By setting the exclusive kwarg to True, an exclusive cumprod is - performed instead: - ```prettyprint - tf.cumprod([a, b, c], exclusive=True) ==> [0, a, a * b] - ```

              By setting the reverse kwarg to True, the cumprod is performed in the - opposite direction: - ```prettyprint - tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c] - ``` - This is more efficient than using separate `tf.reverse` ops.

              The reverse and exclusive kwargs can also be combined: - ```prettyprint - tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 0] - ```

              cumprod'

              Arguments

              :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor v'2 tidx

              axis

              -> Tensor Build t

              out

              cumsum

              Arguments

              :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
              => Tensor v'1 t

              x

              -> Tensor v'2 tidx

              axis

              -> Tensor Build t

              out

              Compute the cumulative sum of the tensor x along axis.

              By default, this op performs an inclusive cumsum, which means that the first - element of the input is identical to the first element of the output: - ```prettyprint - tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c] - ```

              By setting the exclusive kwarg to True, an exclusive cumsum is - performed instead: - ```prettyprint - tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b] - ```

              By setting the reverse kwarg to True, the cumsum is performed in the - opposite direction: - ```prettyprint - tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c] - ``` - This is more efficient than using separate `tf.reverse` ops.

              The reverse and exclusive kwargs can also be combined: - ```prettyprint - tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0] - ```

              cumsum'

              Arguments

              :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor v'2 tidx

              axis

              -> Tensor Build t

              out

              debugIdentity

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              input: Input tensor, non-Reference type.

              -> Tensor Build t

              output: Output tensor that equals the input tensor.

              Debug Identity Op.

              Provides an identity mapping of the non-Ref type input tensor for debugging.

              debugIdentity'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              input: Input tensor, non-Reference type.

              -> Tensor Build t

              output: Output tensor that equals the input tensor.

              debugNanCount

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              input: Input tensor, non-Reference type.

              -> Tensor Build Int64

              output: An integer output tensor that is the number of NaNs in the input.

              Debug NaN Value Counter Op

              Counts number of NaNs in the input tensor, for debugging.

              debugNanCount'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              input: Input tensor, non-Reference type.

              -> Tensor Build Int64

              output: An integer output tensor that is the number of NaNs in the input.

              debugNumericSummary

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              input: Input tensor, non-Reference type, float or double.

              -> Tensor Build Double

              output: A double tensor of shape [12], the elements of which are: - [0]: is initialized (1.0) or not (0.0). - [1]: total number of elements - [2]: -inf count - [3]: negative element count (excluding -inf) - [4]: zero element count - [5]: positive element count (excluding +inf) - [6]: +inf element count - [7]: NaN element count - Output elements [1:8] are all zero, if the tensor is uninitialized. - [8]: minimum of all non-inf and non-NaN elements. - If uninitialized or no such element exists: +inf. - [9]: maximum of all non-inf and non-NaN elements. - If uninitialized or no such element exists: -inf. - [10]: mean of all non-inf and non-NaN elements. - If uninitialized or no such element exists: NaN. - [11]: variance of all non-inf and non-NaN elements. - If uninitialized or no such element exists: NaN.

              Debug Numeric Summary Op.

              Provide a basic summary of numeric value types, range and distribution.

              debugNumericSummary'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              input: Input tensor, non-Reference type, float or double.

              -> Tensor Build Double

              output: A double tensor of shape [12], the elements of which are: - [0]: is initialized (1.0) or not (0.0). - [1]: total number of elements - [2]: -inf count - [3]: negative element count (excluding -inf) - [4]: zero element count - [5]: positive element count (excluding +inf) - [6]: +inf element count - [7]: NaN element count - Output elements [1:8] are all zero, if the tensor is uninitialized. - [8]: minimum of all non-inf and non-NaN elements. - If uninitialized or no such element exists: +inf. - [9]: maximum of all non-inf and non-NaN elements. - If uninitialized or no such element exists: -inf. - [10]: mean of all non-inf and non-NaN elements. - If uninitialized or no such element exists: NaN. - [11]: variance of all non-inf and non-NaN elements. - If uninitialized or no such element exists: NaN.

              decodeBase64

              Arguments

              :: Tensor v'1 ByteString

              input: Base64 strings to decode.

              -> Tensor Build ByteString

              output: Decoded strings.

              Decode web-safe base64-encoded strings.

              Input may or may not have padding at the end. See EncodeBase64 for padding. - Web-safe means that input must use - and _ instead of + and /.

              decodeBase64'

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              input: Base64 strings to decode.

              -> Tensor Build ByteString

              output: Decoded strings.

              decodeCSV

              Arguments

              :: OneOfs `[ByteString, Int32, Int64, Float]` oUT_TYPE 
              => Tensor v'1 ByteString

              records: Each string is a record/row in the csv and all records should have - the same format.

              -> TensorList v'2 oUT_TYPE

              record_defaults: One tensor per column of the input record, with either a - scalar default value for that column or empty if the column is required.

              -> TensorList Build oUT_TYPE

              output: Each tensor will have the same shape as records.

              Convert CSV records to tensors. Each column maps to one tensor.

              RFC 4180 format is expected for the CSV records. + of corresponding 3-element vectors is cross-multiplied independently.

              cross' Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              a: A tensor containing 3-element vectors.

              -> Tensor v'2 t

              b: Another tensor, of same type and shape as a.

              -> Tensor Build t

              product: Pairwise cross product of the vectors in a and b.

              cumprod Source #

              Arguments

              :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
              => Tensor v'1 t

              x

              -> Tensor v'2 tidx

              axis

              -> Tensor Build t

              out

              Compute the cumulative product of the tensor x along axis.

              By default, this op performs an inclusive cumprod, which means that the first + element of the input is identical to the first element of the output:

              ```python + tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + ```

              By setting the exclusive kwarg to True, an exclusive cumprod is + performed instead:

              ```python + tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + ```

              By setting the reverse kwarg to True, the cumprod is performed in the + opposite direction:

              ```python + tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] + ```

              This is more efficient than using separate `tf.reverse` ops.

              The reverse and exclusive kwargs can also be combined:

              ```python + tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] + ```

              cumprod' Source #

              Arguments

              :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor v'2 tidx

              axis

              -> Tensor Build t

              out

              cumsum Source #

              Arguments

              :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
              => Tensor v'1 t

              x

              -> Tensor v'2 tidx

              axis

              -> Tensor Build t

              out

              Compute the cumulative sum of the tensor x along axis.

              By default, this op performs an inclusive cumsum, which means that the first + element of the input is identical to the first element of the output:

              ```python + tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + ```

              By setting the exclusive kwarg to True, an exclusive cumsum is + performed instead:

              ```python + tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + ```

              By setting the reverse kwarg to True, the cumsum is performed in the + opposite direction:

              ```python + tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + ```

              This is more efficient than using separate `tf.reverse` ops.

              The reverse and exclusive kwargs can also be combined:

              ```python + tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] + ```

              cumsum' Source #

              Arguments

              :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor v'2 tidx

              axis

              -> Tensor Build t

              out

              debugGradientIdentity Source #

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              input

              -> Tensor Build t

              output

              Identity op for gradient debugging.

              This op is hidden from public in Python. It is used by TensorFlow Debugger to + register gradient tensors for gradient debugging.

              debugGradientIdentity' Source #

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor Build t

              output

              decodeBase64 Source #

              Arguments

              :: Tensor v'1 ByteString

              input: Base64 strings to decode.

              -> Tensor Build ByteString

              output: Decoded strings.

              Decode web-safe base64-encoded strings.

              Input may or may not have padding at the end. See EncodeBase64 for padding. + Web-safe means that input must use - and _ instead of + and /.

              decodeBase64' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              input: Base64 strings to decode.

              -> Tensor Build ByteString

              output: Decoded strings.

              decodeBmp Source #

              Arguments

              :: Tensor v'1 ByteString

              contents: 0-D. The BMP-encoded image.

              -> Tensor Build Word8

              image: 3-D with shape `[height, width, channels]`. RGB order

              Decode the first frame of a BMP-encoded image to a uint8 tensor.

              The attr channels indicates the desired number of color channels for the + decoded image.

              Accepted values are:

              • 0: Use the number of channels in the BMP-encoded image.
              • 3: output an RGB image.
              • 4: output an RGBA image.

              decodeBmp' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              contents: 0-D. The BMP-encoded image.

              -> Tensor Build Word8

              image: 3-D with shape `[height, width, channels]`. RGB order

              decodeCSV Source #

              Arguments

              :: OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE 
              => Tensor v'1 ByteString

              records: Each string is a record/row in the csv and all records should have + the same format.

              -> TensorList v'2 oUT_TYPE

              record_defaults: One tensor per column of the input record, with either a + scalar default value for that column or empty if the column is required.

              -> TensorList Build oUT_TYPE

              output: Each tensor will have the same shape as records.

              Convert CSV records to tensors. Each column maps to one tensor.

              RFC 4180 format is expected for the CSV records. (https:/tools.ietf.orghtml/rfc4180) - Note that we allow leading and trailing spaces with int or float field.

              decodeCSV'

              Arguments

              :: OneOfs `[ByteString, Int32, Int64, Float]` oUT_TYPE 
              => OpParams 
              -> Tensor v'1 ByteString

              records: Each string is a record/row in the csv and all records should have - the same format.

              -> TensorList v'2 oUT_TYPE

              record_defaults: One tensor per column of the input record, with either a - scalar default value for that column or empty if the column is required.

              -> TensorList Build oUT_TYPE

              output: Each tensor will have the same shape as records.

              decodeGif

              Arguments

              :: Tensor v'1 ByteString

              contents: 0-D. The GIF-encoded image.

              -> Tensor Build Word8

              image: 4-D with shape `[num_frames, height, width, 3]`. RGB order

              Decode the first frame of a GIF-encoded image to a uint8 tensor.

              GIF with frame or transparency compression are not supported - convert animated GIF from compressed to uncompressed by:

              convert $src.gif -coalesce $dst.gif

              decodeGif'

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              contents: 0-D. The GIF-encoded image.

              -> Tensor Build Word8

              image: 4-D with shape `[num_frames, height, width, 3]`. RGB order

              decodeJSONExample

              Arguments

              :: Tensor v'1 ByteString

              json_examples: Each string is a JSON object serialized according to the JSON - mapping of the Example proto.

              -> Tensor Build ByteString

              binary_examples: Each string is a binary Example protocol buffer corresponding + Note that we allow leading and trailing spaces with int or float field.

              decodeCSV' Source #

              Arguments

              :: OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE 
              => OpParams 
              -> Tensor v'1 ByteString

              records: Each string is a record/row in the csv and all records should have + the same format.

              -> TensorList v'2 oUT_TYPE

              record_defaults: One tensor per column of the input record, with either a + scalar default value for that column or empty if the column is required.

              -> TensorList Build oUT_TYPE

              output: Each tensor will have the same shape as records.

              decodeGif Source #

              Arguments

              :: Tensor v'1 ByteString

              contents: 0-D. The GIF-encoded image.

              -> Tensor Build Word8

              image: 4-D with shape `[num_frames, height, width, 3]`. RGB order

              Decode the first frame of a GIF-encoded image to a uint8 tensor.

              GIF with frame or transparency compression are not supported + convert animated GIF from compressed to uncompressed by:

              convert $src.gif -coalesce $dst.gif

              This op also supports decoding JPEGs and PNGs, though it is cleaner to use + `tf.image.decode_image`.

              decodeGif' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              contents: 0-D. The GIF-encoded image.

              -> Tensor Build Word8

              image: 4-D with shape `[num_frames, height, width, 3]`. RGB order

              decodeJSONExample Source #

              Arguments

              :: Tensor v'1 ByteString

              json_examples: Each string is a JSON object serialized according to the JSON + mapping of the Example proto.

              -> Tensor Build ByteString

              binary_examples: Each string is a binary Example protocol buffer corresponding to the respective element of json_examples.

              Convert JSON-encoded Example records to binary protocol buffer strings.

              This op translates a tensor containing Example records, encoded using the standard JSON mapping, into a tensor containing the same records encoded as binary protocol buffers. The resulting tensor can then be fed to any of the other - Example-parsing ops.

              decodeJSONExample'

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              json_examples: Each string is a JSON object serialized according to the JSON - mapping of the Example proto.

              -> Tensor Build ByteString

              binary_examples: Each string is a binary Example protocol buffer corresponding - to the respective element of json_examples.

              decodeJpeg

              Arguments

              :: Tensor v'1 ByteString

              contents: 0-D. The JPEG-encoded image.

              -> Tensor Build Word8

              image: 3-D with shape `[height, width, channels]`..

              Decode a JPEG-encoded image to a uint8 tensor.

              The attr channels indicates the desired number of color channels for the + Example-parsing ops.

              decodeJSONExample' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              json_examples: Each string is a JSON object serialized according to the JSON + mapping of the Example proto.

              -> Tensor Build ByteString

              binary_examples: Each string is a binary Example protocol buffer corresponding + to the respective element of json_examples.

              decodeJpeg Source #

              Arguments

              :: Tensor v'1 ByteString

              contents: 0-D. The JPEG-encoded image.

              -> Tensor Build Word8

              image: 3-D with shape `[height, width, channels]`..

              Decode a JPEG-encoded image to a uint8 tensor.

              The attr channels indicates the desired number of color channels for the decoded image.

              Accepted values are:

              • 0: Use the number of channels in the JPEG-encoded image.
              • 1: output a grayscale image.
              • 3: output an RGB image.

              If needed, the JPEG-encoded image is transformed to match the requested number of color channels.

              The attr ratio allows downscaling the image by an integer factor during decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than - downscaling the image later.

              decodeJpeg'

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              contents: 0-D. The JPEG-encoded image.

              -> Tensor Build Word8

              image: 3-D with shape `[height, width, channels]`..

              decodePng

              Arguments

              :: OneOf `[Word16, Word8]` dtype 
              => Tensor v'1 ByteString

              contents: 0-D. The PNG-encoded image.

              -> Tensor Build dtype

              image: 3-D with shape `[height, width, channels]`.

              Decode a PNG-encoded image to a uint8 or uint16 tensor.

              The attr channels indicates the desired number of color channels for the + downscaling the image later.

              This op also supports decoding PNGs and non-animated GIFs since the interface is + the same, though it is cleaner to use `tf.image.decode_image`.

              decodeJpeg' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              contents: 0-D. The JPEG-encoded image.

              -> Tensor Build Word8

              image: 3-D with shape `[height, width, channels]`..

              decodePng Source #

              Arguments

              :: OneOf '[Word16, Word8] dtype 
              => Tensor v'1 ByteString

              contents: 0-D. The PNG-encoded image.

              -> Tensor Build dtype

              image: 3-D with shape `[height, width, channels]`.

              Decode a PNG-encoded image to a uint8 or uint16 tensor.

              The attr channels indicates the desired number of color channels for the decoded image.

              Accepted values are:

              • 0: Use the number of channels in the PNG-encoded image.
              • 1: output a grayscale image.
              • 3: output an RGB image.
              • 4: output an RGBA image.

              If needed, the PNG-encoded image is transformed to match the requested number - of color channels.

              decodePng'

              Arguments

              :: OneOf `[Word16, Word8]` dtype 
              => OpParams 
              -> Tensor v'1 ByteString

              contents: 0-D. The PNG-encoded image.

              -> Tensor Build dtype

              image: 3-D with shape `[height, width, channels]`.

              decodeRaw

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` out_type 
              => Tensor v'1 ByteString

              bytes: All the elements must have the same length.

              -> Tensor Build out_type

              output: A Tensor with one more dimension than the input bytes. The + of color channels.

              This op also supports decoding JPEGs and non-animated GIFs since the interface + is the same, though it is cleaner to use `tf.image.decode_image`.

              decodePng' Source #

              Arguments

              :: OneOf '[Word16, Word8] dtype 
              => OpParams 
              -> Tensor v'1 ByteString

              contents: 0-D. The PNG-encoded image.

              -> Tensor Build dtype

              image: 3-D with shape `[height, width, channels]`.

              decodeRaw Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type 
              => Tensor v'1 ByteString

              bytes: All the elements must have the same length.

              -> Tensor Build out_type

              output: A Tensor with one more dimension than the input bytes. The added dimension will have size equal to the length of the elements - of bytes divided by the number of bytes to represent out_type.

              Reinterpret the bytes of a string as a vector of numbers.

              decodeRaw'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` out_type 
              => OpParams 
              -> Tensor v'1 ByteString

              bytes: All the elements must have the same length.

              -> Tensor Build out_type

              output: A Tensor with one more dimension than the input bytes. The + of bytes divided by the number of bytes to represent out_type.

              Reinterpret the bytes of a string as a vector of numbers.

              decodeRaw' Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type 
              => OpParams 
              -> Tensor v'1 ByteString

              bytes: All the elements must have the same length.

              -> Tensor Build out_type

              output: A Tensor with one more dimension than the input bytes. The added dimension will have size equal to the length of the elements - of bytes divided by the number of bytes to represent out_type.

              deleteSessionTensor

              Arguments

              :: MonadBuild m' 
              => Tensor v'1 ByteString

              handle: The handle for a tensor stored in the session state.

              -> m' ControlNode 

              Delete the tensor specified by its handle in the session.

              deleteSessionTensor'

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> Tensor v'1 ByteString

              handle: The handle for a tensor stored in the session state.

              -> m' ControlNode 

              denseToDenseSetOperation

              Arguments

              :: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
              => Tensor v'1 t

              set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. - Dimension n contains values in a set, duplicates are allowed but ignored.

              -> Tensor v'2 t

              set2: Tensor with rank n. 1st `n-1` dimensions must be the same as set1. - Dimension n contains values in a set, duplicates are allowed but ignored.

              -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

              (result_indices, result_values, result_shape)

              • result_indices: 2D indices of a SparseTensor.
              • result_values: 1D values of a SparseTensor.
              • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + of bytes divided by the number of bytes to represent out_type.

              decodeWav Source #

              Arguments

              :: Tensor v'1 ByteString

              contents: The WAV-encoded audio, usually from a file.

              -> (Tensor Build Float, Tensor Build Int32)

              (audio, sample_rate)

              • audio: 2-D with shape `[length, channels]`.
              • sample_rate: Scalar holding the sample rate found in the WAV header.

              Decode a 16-bit PCM WAV file to a float tensor.

              The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.

              When desired_channels is set, if the input contains fewer channels than this + then the last channel will be duplicated to give the requested number, else if + the input has more channels than requested then the additional channels will be + ignored.

              If desired_samples is set, then the audio will be cropped or padded with zeroes + to the requested length.

              The first output contains a Tensor with the content of the audio samples. The + lowest dimension will be the number of channels, and the second will be the + number of samples. For example, a ten-sample-long stereo WAV file should give an + output shape of [10, 2].

              decodeWav' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              contents: The WAV-encoded audio, usually from a file.

              -> (Tensor Build Float, Tensor Build Int32)

              (audio, sample_rate)

              • audio: 2-D with shape `[length, channels]`.
              • sample_rate: Scalar holding the sample rate found in the WAV header.

              deleteSessionTensor Source #

              Arguments

              :: MonadBuild m' 
              => Tensor v'1 ByteString

              handle: The handle for a tensor stored in the session state.

              -> m' ControlNode 

              Delete the tensor specified by its handle in the session.

              deleteSessionTensor' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> Tensor v'1 ByteString

              handle: The handle for a tensor stored in the session state.

              -> m' ControlNode 

              denseToDenseSetOperation Source #

              Arguments

              :: OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t 
              => Tensor v'1 t

              set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. + Dimension n contains values in a set, duplicates are allowed but ignored.

              -> Tensor v'2 t

              set2: Tensor with rank n. 1st `n-1` dimensions must be the same as set1. + Dimension n contains values in a set, duplicates are allowed but ignored.

              -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

              (result_indices, result_values, result_shape)

              • result_indices: 2D indices of a SparseTensor.
              • result_values: 1D values of a SparseTensor.
              • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` - is the max result set size across all `0...n-1` dimensions.

              Applies set operation along last dimension of 2 Tensor inputs.

              See SetOperationOp::SetOperationFromContext for values of set_operation.

              Output result is a SparseTensor represented by result_indices, + is the max result set size across all `0...n-1` dimensions.

              Applies set operation along last dimension of 2 Tensor inputs.

              See SetOperationOp::SetOperationFromContext for values of set_operation.

              Output result is a SparseTensor represented by result_indices, result_values, and result_shape. For set1 and set2 ranked n, this has rank n and the same 1st `n-1` dimensions as set1 and set2. The nth dimension contains the result of set_operation applied to the corresponding - `[0...n-1]` dimension of set.

              denseToDenseSetOperation'

              Arguments

              :: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
              => OpParams 
              -> Tensor v'1 t

              set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. - Dimension n contains values in a set, duplicates are allowed but ignored.

              -> Tensor v'2 t

              set2: Tensor with rank n. 1st `n-1` dimensions must be the same as set1. - Dimension n contains values in a set, duplicates are allowed but ignored.

              -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

              (result_indices, result_values, result_shape)

              • result_indices: 2D indices of a SparseTensor.
              • result_values: 1D values of a SparseTensor.
              • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + `[0...n-1]` dimension of set.

                denseToDenseSetOperation' Source #

                Arguments

                :: OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t 
                => OpParams 
                -> Tensor v'1 t

                set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. + Dimension n contains values in a set, duplicates are allowed but ignored.

                -> Tensor v'2 t

                set2: Tensor with rank n. 1st `n-1` dimensions must be the same as set1. + Dimension n contains values in a set, duplicates are allowed but ignored.

                -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                (result_indices, result_values, result_shape)

                • result_indices: 2D indices of a SparseTensor.
                • result_values: 1D values of a SparseTensor.
                • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` - is the max result set size across all `0...n-1` dimensions.

                denseToSparseSetOperation

                Arguments

                :: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
                => Tensor v'1 t

                set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. - Dimension n contains values in a set, duplicates are allowed but ignored.

                -> Tensor v'2 Int64

                set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major - order.

                -> Tensor v'3 t

                set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major - order.

                -> Tensor v'4 Int64

                set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must + is the max result set size across all `0...n-1` dimensions.

                denseToSparseBatchDataset Source #

                Arguments

                :: MonadBuild m' 
                => [DataType]

                output_types

                -> Tensor v'1 ResourceHandle

                input_dataset: A handle to an input dataset. Must have a single component.

                -> Tensor v'2 Int64

                batch_size: A scalar representing the number of elements to accumulate in a + batch.

                -> Tensor v'3 Int64

                row_shape: A vector representing the dense shape of each row in the produced + SparseTensor.

                -> m' (Tensor Value ResourceHandle)

                handle

                Creates a dataset that yields a SparseTensor for each element of the input.

                denseToSparseBatchDataset' Source #

                Arguments

                :: MonadBuild m' 
                => OpParams 
                -> [DataType]

                output_types

                -> Tensor v'1 ResourceHandle

                input_dataset: A handle to an input dataset. Must have a single component.

                -> Tensor v'2 Int64

                batch_size: A scalar representing the number of elements to accumulate in a + batch.

                -> Tensor v'3 Int64

                row_shape: A vector representing the dense shape of each row in the produced + SparseTensor.

                -> m' (Tensor Value ResourceHandle)

                handle

                denseToSparseSetOperation Source #

                Arguments

                :: OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t 
                => Tensor v'1 t

                set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. + Dimension n contains values in a set, duplicates are allowed but ignored.

                -> Tensor v'2 Int64

                set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

                -> Tensor v'3 t

                set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

                -> Tensor v'4 Int64

                set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must be the same as the 1st `n-1` dimensions of set1, `result_shape[n]` is the - max set size across `n-1` dimensions.

                -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                (result_indices, result_values, result_shape)

                • result_indices: 2D indices of a SparseTensor.
                • result_values: 1D values of a SparseTensor.
                • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + max set size across `n-1` dimensions.

                -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                (result_indices, result_values, result_shape)

                • result_indices: 2D indices of a SparseTensor.
                • result_values: 1D values of a SparseTensor.
                • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` - is the max result set size across all `0...n-1` dimensions.

                Applies set operation along last dimension of Tensor and SparseTensor.

                See SetOperationOp::SetOperationFromContext for values of set_operation.

                Input set2 is a SparseTensor represented by set2_indices, set2_values, + is the max result set size across all `0...n-1` dimensions.

              Applies set operation along last dimension of Tensor and SparseTensor.

              See SetOperationOp::SetOperationFromContext for values of set_operation.

              Input set2 is a SparseTensor represented by set2_indices, set2_values, and set2_shape. For set2 ranked n, 1st `n-1` dimensions must be the same as set1. Dimension n contains values in a set, duplicates are allowed but - ignored.

              If validate_indices is True, this op validates the order and range of set2 + ignored.

              If validate_indices is True, this op validates the order and range of set2 indices.

              Output result is a SparseTensor represented by result_indices, result_values, and result_shape. For set1 and set2 ranked n, this has rank n and the same 1st `n-1` dimensions as set1 and set2. The nth dimension contains the result of set_operation applied to the corresponding - `[0...n-1]` dimension of set.

              denseToSparseSetOperation'

              Arguments

              :: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
              => OpParams 
              -> Tensor v'1 t

              set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. - Dimension n contains values in a set, duplicates are allowed but ignored.

              -> Tensor v'2 Int64

              set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major - order.

              -> Tensor v'3 t

              set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major - order.

              -> Tensor v'4 Int64

              set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must + `[0...n-1]` dimension of set.

              denseToSparseSetOperation' Source #

              Arguments

              :: OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t 
              => OpParams 
              -> Tensor v'1 t

              set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. + Dimension n contains values in a set, duplicates are allowed but ignored.

              -> Tensor v'2 Int64

              set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

              -> Tensor v'3 t

              set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

              -> Tensor v'4 Int64

              set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must be the same as the 1st `n-1` dimensions of set1, `result_shape[n]` is the - max set size across `n-1` dimensions.

              -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

              (result_indices, result_values, result_shape)

              • result_indices: 2D indices of a SparseTensor.
              • result_values: 1D values of a SparseTensor.
              • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + max set size across `n-1` dimensions.

              -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

              (result_indices, result_values, result_shape)

              • result_indices: 2D indices of a SparseTensor.
              • result_values: 1D values of a SparseTensor.
              • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` - is the max result set size across all `0...n-1` dimensions.

              depthToSpace

              Arguments

              :: TensorType t 
              => Int64

              block_size: The size of the spatial block, same as in Space2Depth.

              -> Tensor v'1 t

              input

              -> Tensor Build t

              output

              DepthToSpace for tensors of type T.

              Rearranges data from depth into blocks of spatial data. + is the max result set size across all `0...n-1` dimensions.

              depthToSpace Source #

              Arguments

              :: TensorType t 
              => Int64

              block_size: The size of the spatial block, same as in Space2Depth.

              -> Tensor v'1 t

              input

              -> Tensor Build t

              output

              DepthToSpace for tensors of type T.

              Rearranges data from depth into blocks of spatial data. This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of the input tensor where values from the depth dimension are moved in spatial blocks to the height and width dimensions. @@ -787,57 +828,77 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core block_size be >=1 and that `block_size * block_size` be a divisor of the input depth.

              This operation is useful for resizing the activations between convolutions (but keeping all data), e.g. instead of pooling. It is also useful for training - purely convolutional models.

              For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:

              ```prettyprint - x = [[[[1, 2, 3, 4]]]]

              ```

              This operation will output a tensor of shape `[1, 2, 2, 1]`:

              ```prettyprint + purely convolutional models.

              For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:

              ``` + x = [[[[1, 2, 3, 4]]]]

              ```

              This operation will output a tensor of shape `[1, 2, 2, 1]`:

              ``` [[[[1], [2]], [[3], [4]]]] ```

              Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, the corresponding output will have 2x2 elements and will have a depth of 1 channel (1 = `4 / (block_size * block_size)`). - The output element shape is `[2, 2, 1]`.

              For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.

              ```prettyprint + The output element shape is `[2, 2, 1]`.

              For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.

              ``` x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ```

              This operation, for block size of 2, will return the following tensor of shape - `[1, 2, 2, 3]`

              ```prettyprint + `[1, 2, 2, 3]`

              ``` [[[[1, 2, 3], [4, 5, 6]], - [[7, 8, 9], [10, 11, 12]]]]

              ```

              Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:

              ```prettyprint + [[7, 8, 9], [10, 11, 12]]]]

              ```

              Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:

              ``` x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]] - ```

              the operator will return the following tensor of shape `[1 4 4 1]`:

              ```prettyprint + ```

              the operator will return the following tensor of shape `[1 4 4 1]`:

              ``` x = [[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [ [9], [10], [13], [14]], - [ [11], [12], [15], [16]]]

              ```

              depthToSpace'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Int64

              block_size: The size of the spatial block, same as in Space2Depth.

              -> Tensor v'1 t

              input

              -> Tensor Build t

              output

              depthwiseConv2dNative

              Arguments

              :: OneOf `[Double, Float]` t 
              => Tensor v'1 t

              input

              -> Tensor v'2 t

              filter

              -> Tensor Build t

              output

              Computes a 2-D depthwise convolution given 4-D input and filter tensors.

              Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + [ [11], [12], [15], [16]]]

              ```

              depthToSpace' Source #

              Arguments

              :: TensorType t 
              => OpParams 
              -> Int64

              block_size: The size of the spatial block, same as in Space2Depth.

              -> Tensor v'1 t

              input

              -> Tensor Build t

              output

              depthwiseConv2dNative Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => Tensor v'1 t

              input

              -> Tensor v'2 t

              filter

              -> Tensor Build t

              output

              Computes a 2-D depthwise convolution given 4-D input and filter tensors.

              Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]`, containing in_channels convolutional filters of depth 1, depthwise_conv2d applies a different filter to each input channel (expanding from 1 channel to channel_multiplier channels for each), then concatenates the results - together. Thus, the output has `in_channels * channel_multiplier` channels.

              for k in 0..in_channels-1 + together. Thus, the output has `in_channels * channel_multiplier` channels.

              ``` + for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * - filter[di, dj, k, q]

              Must have `strides[0] = strides[3] = 1`. For the most common case of the same - horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

              depthwiseConv2dNative'

              Arguments

              :: OneOf `[Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor v'2 t

              filter

              -> Tensor Build t

              output

              depthwiseConv2dNativeBackpropFilter

              Arguments

              :: OneOf `[Double, Float]` t 
              => Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, - where filter is a 4-D - `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape + filter[di, dj, k, q] + ```

              Must have `strides[0] = strides[3] = 1`. For the most common case of the same + horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

              depthwiseConv2dNative' Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor v'2 t

              filter

              -> Tensor Build t

              output

              depthwiseConv2dNativeBackpropFilter Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => Tensor v'1 t

              input: 4-D with shape based on data_format. For example, if + data_format is NHWC then input is a 4-D `[batch, in_height, + in_width, in_channels]` tensor.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.

              -> Tensor v'3 t

              out_backprop: 4-D with shape based on data_format. + For example, if data_format is NHWC then + out_backprop shape is `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. - the filter input of the convolution.

              Computes the gradients of depthwise convolution with respect to the filter.

              depthwiseConv2dNativeBackpropFilter'

              Arguments

              :: OneOf `[Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, - where filter is a 4-D - `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape + the filter input of the convolution.

              Computes the gradients of depthwise convolution with respect to the filter.

              depthwiseConv2dNativeBackpropFilter' Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape based on data_format. For example, if + data_format is NHWC then input is a 4-D `[batch, in_height, + in_width, in_channels]` tensor.

              -> Tensor v'2 Int32

              filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.

              -> Tensor v'3 t

              out_backprop: 4-D with shape based on data_format. + For example, if data_format is NHWC then + out_backprop shape is `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. - the filter input of the convolution.

              depthwiseConv2dNativeBackpropInput

              Arguments

              :: OneOf `[Double, Float]` t 
              => Tensor v'1 Int32

              input_sizes: An integer vector representing the shape of input, - where input is a 4-D `[batch, height, width, channels]` tensor.

              -> Tensor v'2 t

              filter: 4-D with shape - `[filter_height, filter_width, in_channels, depthwise_multiplier]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient - w.r.t. the input of the convolution.

              Computes the gradients of depthwise convolution with respect to the input.

              depthwiseConv2dNativeBackpropInput'

              Arguments

              :: OneOf `[Double, Float]` t 
              => OpParams 
              -> Tensor v'1 Int32

              input_sizes: An integer vector representing the shape of input, - where input is a 4-D `[batch, height, width, channels]` tensor.

              -> Tensor v'2 t

              filter: 4-D with shape - `[filter_height, filter_width, in_channels, depthwise_multiplier]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient - w.r.t. the input of the convolution.

              dequantize

              Arguments

              :: OneOf `[Int16, Int32, Word16, Word8]` t 
              => Tensor v'1 t

              input

              -> Tensor v'2 Float

              min_range: The minimum scalar value possibly produced for the input.

              -> Tensor v'3 Float

              max_range: The maximum scalar value possibly produced for the input.

              -> Tensor Build Float

              output

              Dequantize the input tensor into a float Tensor.

              min_range, max_range
              are scalar floats that specify the range for + the filter input of the convolution.

              depthwiseConv2dNativeBackpropInput Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => Tensor v'1 Int32

              input_sizes: An integer vector representing the shape of input, based + on data_format. For example, if data_format is NHWC then + input is a 4-D `[batch, height, width, channels]` tensor.

              -> Tensor v'2 t

              filter: 4-D with shape + `[filter_height, filter_width, in_channels, depthwise_multiplier]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape based on data_format. + For example, if data_format is NHWC then + out_backprop shape is `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape according to data_format. For example, if + data_format is NHWC, output shape is `[batch, in_height, + in_width, in_channels]`. Gradient w.r.t. the input of the + convolution.

              Computes the gradients of depthwise convolution with respect to the input.

              depthwiseConv2dNativeBackpropInput' Source #

              Arguments

              :: OneOf '[Double, Float] t 
              => OpParams 
              -> Tensor v'1 Int32

              input_sizes: An integer vector representing the shape of input, based + on data_format. For example, if data_format is NHWC then + input is a 4-D `[batch, height, width, channels]` tensor.

              -> Tensor v'2 t

              filter: 4-D with shape + `[filter_height, filter_width, in_channels, depthwise_multiplier]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape based on data_format. + For example, if data_format is NHWC then + out_backprop shape is `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

              -> Tensor Build t

              output: 4-D with shape according to data_format. For example, if + data_format is NHWC, output shape is `[batch, in_height, + in_width, in_channels]`. Gradient w.r.t. the input of the + convolution.

              dequantize Source #

              Arguments

              :: OneOf '[Int16, Int32, Word16, Word8] t 
              => Tensor v'1 t

              input

              -> Tensor v'2 Float

              min_range: The minimum scalar value possibly produced for the input.

              -> Tensor v'3 Float

              max_range: The maximum scalar value possibly produced for the input.

              -> Tensor Build Float

              output

              Dequantize the input tensor into a float Tensor.

              min_range, max_range
              are scalar floats that specify the range for the input data. The mode attribute controls exactly which calculations are used to convert the float values to their quantized equivalents.

              In MIN_COMBINED mode, each value of the tensor will undergo the following:

              ``` if T == qint8, in[i] += (range(T) + 1)/ 2.0 @@ -849,15 +910,15 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core Dequantize on quint8 will take each value, cast to float, and multiply by 6 / 255. Note that if quantizedtype is qint8, the operation will additionally add - each value by 128 prior to casting.

              If the mode is MIN_FIRST, then this approach is used:

              ``` + each value by 128 prior to casting.

              If the mode is MIN_FIRST, then this approach is used:

              ```c++ number_of_steps = 1 << (# of bits in T) range_adjust = number_of_steps / (number_of_steps - 1) range = (range_max - range_min) * range_adjust range_scale = range / number_of_steps const double offset_input = static_castdouble(input) - lowest_quantized; result = range_min + ((input - numeric_limitsT::min()) * range_scale) - ```

              dequantize'

              Arguments

              :: OneOf `[Int16, Int32, Word16, Word8]` t 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor v'2 Float

              min_range: The minimum scalar value possibly produced for the input.

              -> Tensor v'3 Float

              max_range: The maximum scalar value possibly produced for the input.

              -> Tensor Build Float

              output

              deserializeManySparse

              Arguments

              :: TensorType dtype 
              => Tensor v'1 ByteString

              serialized_sparse: 2-D, The N serialized SparseTensor objects. - Must have 3 columns.

              -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)

              (sparse_indices, sparse_values, sparse_shape)

              • sparse_indices
              • sparse_values
              • sparse_shape

              Deserialize and concatenate SparseTensors from a serialized minibatch.

              The input serialized_sparse must be a string matrix of shape `[N x 3]` where + ```

              dequantize' Source #

              Arguments

              :: OneOf '[Int16, Int32, Word16, Word8] t 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor v'2 Float

              min_range: The minimum scalar value possibly produced for the input.

              -> Tensor v'3 Float

              max_range: The maximum scalar value possibly produced for the input.

              -> Tensor Build Float

              output

              deserializeManySparse Source #

              Arguments

              :: TensorType dtype 
              => Tensor v'1 ByteString

              serialized_sparse: 2-D, The N serialized SparseTensor objects. + Must have 3 columns.

              -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)

              (sparse_indices, sparse_values, sparse_shape)

              • sparse_indices
              • sparse_values
              • sparse_shape

              Deserialize and concatenate SparseTensors from a serialized minibatch.

              The input serialized_sparse must be a string matrix of shape `[N x 3]` where N is the minibatch size and the rows correspond to packed outputs of SerializeSparse. The ranks of the original SparseTensor objects must all match. When the final SparseTensor is created, it has rank one @@ -881,28 +942,29 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [1 2] [1 10] values = [1, 2, 3, 4, 5] - shape = [2 50]

              deserializeManySparse'

              Arguments

              :: TensorType dtype 
              => OpParams 
              -> Tensor v'1 ByteString

              serialized_sparse: 2-D, The N serialized SparseTensor objects. - Must have 3 columns.

              -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)

              (sparse_indices, sparse_values, sparse_shape)

              • sparse_indices
              • sparse_values
              • sparse_shape

              destroyTemporaryVariable

              Arguments

              :: (MonadBuild m', TensorType t) 
              => Tensor Ref t

              ref: A reference to the temporary variable tensor.

              -> m' (Tensor Value t)

              value

              Destroys the temporary variable and returns its final value.

              Sets output to the value of the Tensor pointed to by ref, then destroys + shape = [2 50]

              deserializeManySparse' Source #

              Arguments

              :: TensorType dtype 
              => OpParams 
              -> Tensor v'1 ByteString

              serialized_sparse: 2-D, The N serialized SparseTensor objects. + Must have 3 columns.

              -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)

              (sparse_indices, sparse_values, sparse_shape)

              • sparse_indices
              • sparse_values
              • sparse_shape

              destroyResourceOp Source #

              Arguments

              :: MonadBuild m' 
              => Tensor v'1 ResourceHandle

              resource: handle to the resource to delete.

              -> m' ControlNode 

              Deletes the resource specified by the handle.

              All subsequent operations using the resource will result in a NotFound + error status.

              destroyResourceOp' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> Tensor v'1 ResourceHandle

              resource: handle to the resource to delete.

              -> m' ControlNode 

              destroyTemporaryVariable Source #

              Arguments

              :: (MonadBuild m', TensorType t) 
              => Tensor Ref t

              ref: A reference to the temporary variable tensor.

              -> m' (Tensor Value t)

              value

              Destroys the temporary variable and returns its final value.

              Sets output to the value of the Tensor pointed to by ref, then destroys the temporary variable called var_name. All other uses of ref *must* have executed before this op. This is typically achieved by chaining the ref through each assign op, or by - using control dependencies.

              Outputs the final value of the tensor pointed to by ref.

              destroyTemporaryVariable'

              Arguments

              :: (MonadBuild m', TensorType t) 
              => OpParams 
              -> Tensor Ref t

              ref: A reference to the temporary variable tensor.

              -> m' (Tensor Value t)

              value

              diag

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t 
              => Tensor v'1 t

              diagonal: Rank k tensor where k is at most 3.

              -> Tensor Build t

              output

              Returns a diagonal tensor with a given diagonal values.

              Given a diagonal, this operation returns a tensor with the diagonal and + using control dependencies.

              Outputs the final value of the tensor pointed to by ref.

              destroyTemporaryVariable' Source #

              Arguments

              :: (MonadBuild m', TensorType t) 
              => OpParams 
              -> Tensor Ref t

              ref: A reference to the temporary variable tensor.

              -> m' (Tensor Value t)

              value

              diag Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t 
              => Tensor v'1 t

              diagonal: Rank k tensor where k is at most 3.

              -> Tensor Build t

              output

              Returns a diagonal tensor with a given diagonal values.

              Given a diagonal, this operation returns a tensor with the diagonal and everything else padded with zeros. The diagonal is computed as follows:

              Assume diagonal has dimensions [D1,..., Dk], then the output is a tensor of - rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:

              `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.

              For example:

              ```prettyprint + rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:

              `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.

              For example:

              ``` # diagonal is [1, 2, 3, 4] tf.diag(diagonal) ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] - ```

              diag'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              diagonal: Rank k tensor where k is at most 3.

              -> Tensor Build t

              output

              diagPart

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t 
              => Tensor v'1 t

              input: Rank k tensor where k is 2, 4, or 6.

              -> Tensor Build t

              diagonal: The extracted diagonal.

              Returns the diagonal part of the tensor.

              This operation returns a tensor with the diagonal part + ```

              diag' Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              diagonal: Rank k tensor where k is at most 3.

              -> Tensor Build t

              output

              diagPart Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t 
              => Tensor v'1 t

              input: Rank k tensor where k is 2, 4, or 6.

              -> Tensor Build t

              diagonal: The extracted diagonal.

              Returns the diagonal part of the tensor.

              This operation returns a tensor with the diagonal part of the input. The diagonal part is computed as follows:

              Assume input has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a - tensor of rank k with dimensions `[D1,..., Dk]` where:

              `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.

              For example:

              ```prettyprint + tensor of rank k with dimensions `[D1,..., Dk]` where:

              `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.

              For example:

              ``` # input is [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]]

              tf.diag_part(input) ==> [1, 2, 3, 4] - ```

              diagPart'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: Rank k tensor where k is 2, 4, or 6.

              -> Tensor Build t

              diagonal: The extracted diagonal.

              digamma

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes Psi, the derivative of Lgamma (the log of the absolute value of

              `Gamma(x)`), element-wise.

              digamma'

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build t

              y

              dilation2D

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor Build t

              output: 4-D with shape `[batch, out_height, out_width, depth]`.

              Computes the grayscale dilation of 4-D input and 3-D filter tensors.

              The input tensor has shape `[batch, in_height, in_width, depth]` and the - filter tensor has shape `[filter_height, filter_width, depth]`, i.e., each + ```

              diagPart' Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: Rank k tensor where k is 2, 4, or 6.

              -> Tensor Build t

              diagonal: The extracted diagonal.

              digamma Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes Psi, the derivative of Lgamma (the log of the absolute value of

              `Gamma(x)`), element-wise.

              digamma' Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build t

              y

              dilation2D Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor Build t

              output: 4-D with shape `[batch, out_height, out_width, depth]`.

              Computes the grayscale dilation of 4-D input and 3-D filter tensors.

              The input tensor has shape `[batch, in_height, in_width, depth]` and the + filter tensor has shape `[filter_height, filter_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The output tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output @@ -914,19 +976,19 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c]

              Max-pooling is a special case when the filter has size equal to the pooling - kernel size and contains all zeros.

              Note on duality: The dilation of input by the filter is equal to the - negation of the erosion of `-input` by the reflected filter.

              dilation2D'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor Build t

              output: 4-D with shape `[batch, out_height, out_width, depth]`.

              dilation2DBackpropFilter

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

              -> Tensor Build t

              filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`.

              Computes the gradient of morphological 2-D dilation with respect to the filter.

              dilation2DBackpropFilter'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

              -> Tensor Build t

              filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`.

              dilation2DBackpropInput

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

              -> Tensor Build t

              in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`.

              Computes the gradient of morphological 2-D dilation with respect to the input.

              dilation2DBackpropInput'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

              -> Tensor Build t

              in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`.

              div

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              x

              -> Tensor v'2 t

              y

              -> Tensor Build t

              z

              Returns x / y element-wise.

              • NOTE*: Div supports broadcasting. More about broadcasting - here

              div'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor v'2 t

              y

              -> Tensor Build t

              z

              drawBoundingBoxes

              Arguments

              :: OneOf `[Word16, Float]` t 
              => Tensor v'1 t

              images: 4-D with shape `[batch, height, width, depth]`. A batch of images.

              -> Tensor v'2 Float

              boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding - boxes.

              -> Tensor Build t

              output: 4-D with the same shape as images. The batch of input images with + kernel size and contains all zeros.

              Note on duality: The dilation of input by the filter is equal to the + negation of the erosion of `-input` by the reflected filter.

              dilation2D' Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor Build t

              output: 4-D with shape `[batch, out_height, out_width, depth]`.

              dilation2DBackpropFilter Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

              -> Tensor Build t

              filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`.

              Computes the gradient of morphological 2-D dilation with respect to the filter.

              dilation2DBackpropFilter' Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

              -> Tensor Build t

              filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`.

              dilation2DBackpropInput Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

              -> Tensor Build t

              in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`.

              Computes the gradient of morphological 2-D dilation with respect to the input.

              dilation2DBackpropInput' Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              input: 4-D with shape `[batch, in_height, in_width, depth]`.

              -> Tensor v'2 t

              filter: 3-D with shape `[filter_height, filter_width, depth]`.

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

              -> Tensor Build t

              in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`.

              div Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => Tensor v'1 t

              x

              -> Tensor v'2 t

              y

              -> Tensor Build t

              z

              Returns x / y element-wise.

              • NOTE*: Div supports broadcasting. More about broadcasting + here

              drawBoundingBoxes Source #

              Arguments

              :: OneOf '[Word16, Float] t 
              => Tensor v'1 t

              images: 4-D with shape `[batch, height, width, depth]`. A batch of images.

              -> Tensor v'2 Float

              boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + boxes.

              -> Tensor Build t

              output: 4-D with the same shape as images. The batch of input images with bounding boxes drawn on the images.

              Draw bounding boxes on a batch of images.

              Outputs a copy of images but draws on top of the pixels zero or more bounding boxes specified by the locations in boxes. The coordinates of the each bounding box in boxes are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image.

              For example, if an image is 100 x 200 pixels and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the - bounding box will be `(10, 40)` to `(50, 180)`.

              Parts of the bounding box may fall outside the image.

              drawBoundingBoxes'

              Arguments

              :: OneOf `[Word16, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              images: 4-D with shape `[batch, height, width, depth]`. A batch of images.

              -> Tensor v'2 Float

              boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding - boxes.

              -> Tensor Build t

              output: 4-D with the same shape as images. The batch of input images with - bounding boxes drawn on the images.

              dynamicPartition

              Arguments

              :: TensorType t 
              => Int64

              num_partitions: The number of partitions to output.

              -> Tensor v'1 t

              data

              -> Tensor v'2 Int32

              partitions: Any shape. Indices in the range `[0, num_partitions)`.

              -> [Tensor Build t]

              outputs

              Partitions `data` into num_partitions tensors using indices from partitions.

              For each index tuple js of size `partitions.ndim`, the slice `data[js, ...]` + bounding box will be `(10, 40)` to `(50, 180)`.

              Parts of the bounding box may fall outside the image.

              drawBoundingBoxes' Source #

              Arguments

              :: OneOf '[Word16, Float] t 
              => OpParams 
              -> Tensor v'1 t

              images: 4-D with shape `[batch, height, width, depth]`. A batch of images.

              -> Tensor v'2 Float

              boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + boxes.

              -> Tensor Build t

              output: 4-D with the same shape as images. The batch of input images with + bounding boxes drawn on the images.

              dynamicPartition Source #

              Arguments

              :: TensorType t 
              => Int64

              num_partitions: The number of partitions to output.

              -> Tensor v'1 t

              data

              -> Tensor v'2 Int32

              partitions: Any shape. Indices in the range `[0, num_partitions)`.

              -> [Tensor Build t]

              outputs

              Partitions `data` into num_partitions tensors using indices from partitions.

              For each index tuple js of size `partitions.ndim`, the slice `data[js, ...]` becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` are placed in `outputs[i]` in lexicographic order of js, and the first dimension of `outputs[i]` is the number of entries in partitions equal to i. @@ -944,9 +1006,9 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core data = [10, 20, 30, 40, 50] outputs[0] = [10, 20, 50] outputs[1] = [30, 40] - ```

              style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/DynamicPartition.png" alt - /div

              dynamicPartition'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Int64

              num_partitions: The number of partitions to output.

              -> Tensor v'1 t

              data

              -> Tensor v'2 Int32

              partitions: Any shape. Indices in the range `[0, num_partitions)`.

              -> [Tensor Build t]

              outputs

              dynamicStitch

              Arguments

              :: TensorType t 
              => [Tensor v'1 Int32]

              indices

              -> [Tensor v'2 t]

              data

              -> Tensor Build t

              merged

              Interleave the values from the `data` tensors into a single tensor.

              Builds a merged tensor such that

              ```python + ```

              See dynamic_stitch for an example on how to merge partitions back.

              style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt + /div

              dynamicPartition' Source #

              Arguments

              :: TensorType t 
              => OpParams 
              -> Int64

              num_partitions: The number of partitions to output.

              -> Tensor v'1 t

              data

              -> Tensor v'2 Int32

              partitions: Any shape. Indices in the range `[0, num_partitions)`.

              -> [Tensor Build t]

              outputs

              dynamicStitch Source #

              Arguments

              :: TensorType t 
              => [Tensor v'1 Int32]

              indices

              -> [Tensor v'2 t]

              data

              -> Tensor Build t

              merged

              Interleave the values from the `data` tensors into a single tensor.

              Builds a merged tensor such that

              ```python merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] ```

              For example, if each `indices[m]` is scalar or vector, we have

              ```python # Scalar indices: @@ -966,14 +1028,28 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], [51, 52], [61, 62]] + ```

              This method can be used to merge partitions created by dynamic_partition + as illustrated on the following example:

              ```python + # Apply function (increments x_i) on elements for which a certain condition + # apply (x_i != -1 in this example). + x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + condition_mask=tf.not_equal(x,tf.constant(-1.)) + partitioned_data = tf.dynamic_partition( + x, tf.cast(condition_mask, tf.int32) , 2) + partitioned_data[1] = partitioned_data[1] + 1.0 + condition_indices = tf.dynamic_partition( + tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + x = tf.dynamic_stitch(condition_indices, partitioned_data) + # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + # unchanged. ```

              style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/DynamicStitch.png" alt - /div

              dynamicStitch'

              Arguments

              :: TensorType t 
              => OpParams 
              -> [Tensor v'1 Int32]

              indices

              -> [Tensor v'2 t]

              data

              -> Tensor Build t

              merged

              editDistance

              Arguments

              :: TensorType t 
              => Tensor v'1 Int64

              hypothesis_indices: The indices of the hypothesis list SparseTensor. - This is an N x R int64 matrix.

              -> Tensor v'2 t

              hypothesis_values: The values of the hypothesis list SparseTensor. - This is an N-length vector.

              -> Tensor v'3 Int64

              hypothesis_shape: The shape of the hypothesis list SparseTensor. - This is an R-length vector.

              -> Tensor v'4 Int64

              truth_indices: The indices of the truth list SparseTensor. - This is an M x R int64 matrix.

              -> Tensor v'5 t

              truth_values: The values of the truth list SparseTensor. - This is an M-length vector.

              -> Tensor v'6 Int64

              truth_shape: truth indices, vector.

              -> Tensor Build Float

              output: A dense float tensor with rank R - 1.

              For the example input:

              // hypothesis represents a 2x1 matrix with variable-length values: + style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt + /div

              dynamicStitch' Source #

              Arguments

              :: TensorType t 
              => OpParams 
              -> [Tensor v'1 Int32]

              indices

              -> [Tensor v'2 t]

              data

              -> Tensor Build t

              merged

              editDistance Source #

              Arguments

              :: TensorType t 
              => Tensor v'1 Int64

              hypothesis_indices: The indices of the hypothesis list SparseTensor. + This is an N x R int64 matrix.

              -> Tensor v'2 t

              hypothesis_values: The values of the hypothesis list SparseTensor. + This is an N-length vector.

              -> Tensor v'3 Int64

              hypothesis_shape: The shape of the hypothesis list SparseTensor. + This is an R-length vector.

              -> Tensor v'4 Int64

              truth_indices: The indices of the truth list SparseTensor. + This is an M x R int64 matrix.

              -> Tensor v'5 t

              truth_values: The values of the truth list SparseTensor. + This is an M-length vector.

              -> Tensor v'6 Int64

              truth_shape: truth indices, vector.

              -> Tensor Build Float

              output: A dense float tensor with rank R - 1.

              For the example input:

              // hypothesis represents a 2x1 matrix with variable-length values: // (0,0) = ["a"] // (1,0) = ["b"] hypothesis_indices = [[0, 0, 0], @@ -995,12 +1071,12 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis

              Computes the (possibly normalized) Levenshtein Edit Distance.

              The inputs are variable-length sequences provided by SparseTensors (hypothesis_indices, hypothesis_values, hypothesis_shape) and - (truth_indices, truth_values, truth_shape).

              The inputs are:

              editDistance'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 Int64

              hypothesis_indices: The indices of the hypothesis list SparseTensor. - This is an N x R int64 matrix.

              -> Tensor v'2 t

              hypothesis_values: The values of the hypothesis list SparseTensor. - This is an N-length vector.

              -> Tensor v'3 Int64

              hypothesis_shape: The shape of the hypothesis list SparseTensor. - This is an R-length vector.

              -> Tensor v'4 Int64

              truth_indices: The indices of the truth list SparseTensor. - This is an M x R int64 matrix.

              -> Tensor v'5 t

              truth_values: The values of the truth list SparseTensor. - This is an M-length vector.

              -> Tensor v'6 Int64

              truth_shape: truth indices, vector.

              -> Tensor Build Float

              output: A dense float tensor with rank R - 1.

              For the example input:

              // hypothesis represents a 2x1 matrix with variable-length values: + (truth_indices, truth_values, truth_shape).

              The inputs are:

              editDistance' Source #

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 Int64

              hypothesis_indices: The indices of the hypothesis list SparseTensor. + This is an N x R int64 matrix.

              -> Tensor v'2 t

              hypothesis_values: The values of the hypothesis list SparseTensor. + This is an N-length vector.

              -> Tensor v'3 Int64

              hypothesis_shape: The shape of the hypothesis list SparseTensor. + This is an R-length vector.

              -> Tensor v'4 Int64

              truth_indices: The indices of the truth list SparseTensor. + This is an M x R int64 matrix.

              -> Tensor v'5 t

              truth_values: The values of the truth list SparseTensor. + This is an M-length vector.

              -> Tensor v'6 Int64

              truth_shape: truth indices, vector.

              -> Tensor Build Float

              output: A dense float tensor with rank R - 1.

              For the example input:

              // hypothesis represents a 2x1 matrix with variable-length values: // (0,0) = ["a"] // (1,0) = ["b"] hypothesis_indices = [[0, 0, 0], @@ -1019,32 +1095,36 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core truth_shape = [2, 2, 2] normalize = true

              The output will be:

              // output is a 2x2 matrix with edit distances normalized by truth lengths. output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis - [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis

              elu

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              features

              -> Tensor Build t

              activations

              Computes exponential linear: `exp(features) - 1` if < 0, features otherwise.

              See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)

              elu'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              features

              -> Tensor Build t

              activations

              eluGrad

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              gradients: The backpropagated gradients to the corresponding Elu operation.

              -> Tensor v'2 t

              outputs: The outputs of the corresponding Elu operation.

              -> Tensor Build t

              backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, - gradients otherwise.

              Computes gradients for the exponential linear (Elu) operation.

              eluGrad'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              gradients: The backpropagated gradients to the corresponding Elu operation.

              -> Tensor v'2 t

              outputs: The outputs of the corresponding Elu operation.

              -> Tensor Build t

              backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, - gradients otherwise.

              encodeBase64

              Arguments

              :: Tensor v'1 ByteString

              input: Strings to be encoded.

              -> Tensor Build ByteString

              output: Input strings encoded in base64.

              Encode strings into web-safe base64 format.

              Refer to the following article for more information on base64 format: + [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis

              elu Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => Tensor v'1 t

              features

              -> Tensor Build t

              activations

              Computes exponential linear: `exp(features) - 1` if < 0, features otherwise.

              See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)

              elu' Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              features

              -> Tensor Build t

              activations

              eluGrad Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => Tensor v'1 t

              gradients: The backpropagated gradients to the corresponding Elu operation.

              -> Tensor v'2 t

              outputs: The outputs of the corresponding Elu operation.

              -> Tensor Build t

              backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, + gradients otherwise.

              Computes gradients for the exponential linear (Elu) operation.

              eluGrad' Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              gradients: The backpropagated gradients to the corresponding Elu operation.

              -> Tensor v'2 t

              outputs: The outputs of the corresponding Elu operation.

              -> Tensor Build t

              backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, + gradients otherwise.

              encodeBase64 Source #

              Arguments

              :: Tensor v'1 ByteString

              input: Strings to be encoded.

              -> Tensor Build ByteString

              output: Input strings encoded in base64.

              Encode strings into web-safe base64 format.

              Refer to the following article for more information on base64 format: en.wikipedia.orgwikiBase64. Base64 strings may have padding with '=' at the end so that the encoded has length multiple of 4. See Padding section of the - link above.

              Web-safe means that the encoder uses - and _ instead of + and /.

              encodeBase64'

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              input: Strings to be encoded.

              -> Tensor Build ByteString

              output: Input strings encoded in base64.

              encodeJpeg

              Arguments

              :: Tensor v'1 Word8

              image: 3-D with shape `[height, width, channels]`.

              -> Tensor Build ByteString

              contents: 0-D. JPEG-encoded image.

              JPEG-encode an image.

              image is a 3-D uint8 Tensor of shape `[height, width, channels]`.

              The attr format can be used to override the color format of the encoded + link above.

              Web-safe means that the encoder uses - and _ instead of + and /.

              encodeBase64' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 ByteString

              input: Strings to be encoded.

              -> Tensor Build ByteString

              output: Input strings encoded in base64.

              encodeJpeg Source #

              Arguments

              :: Tensor v'1 Word8

              image: 3-D with shape `[height, width, channels]`.

              -> Tensor Build ByteString

              contents: 0-D. JPEG-encoded image.

              JPEG-encode an image.

              image is a 3-D uint8 Tensor of shape `[height, width, channels]`.

              The attr format can be used to override the color format of the encoded output. Values can be:

              • `''`: Use a default format based on the number of channels in the image.
              • grayscale: Output a grayscale JPEG image. The channels dimension of image must be 1.
              • rgb: Output an RGB JPEG image. The channels dimension of image must be 3.

              If format is not specified or is the empty string, a default format is picked - in function of the number of channels in image:

              • 1: Output a grayscale image.
              • 3: Output an RGB image.

              encodeJpeg'

              Arguments

              :: OpParams 
              -> Tensor v'1 Word8

              image: 3-D with shape `[height, width, channels]`.

              -> Tensor Build ByteString

              contents: 0-D. JPEG-encoded image.

              encodePng

              Arguments

              :: OneOf `[Word16, Word8]` t 
              => Tensor v'1 t

              image: 3-D with shape `[height, width, channels]`.

              -> Tensor Build ByteString

              contents: 0-D. PNG-encoded image.

              PNG-encode an image.

              image is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` + in function of the number of channels in image:

              • 1: Output a grayscale image.
              • 3: Output an RGB image.

              encodeJpeg' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 Word8

              image: 3-D with shape `[height, width, channels]`.

              -> Tensor Build ByteString

              contents: 0-D. JPEG-encoded image.

              encodePng Source #

              Arguments

              :: OneOf '[Word16, Word8] t 
              => Tensor v'1 t

              image: 3-D with shape `[height, width, channels]`.

              -> Tensor Build ByteString

              contents: 0-D. PNG-encoded image.

              PNG-encode an image.

              image is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` where channels is:

              • 1: for grayscale.
              • 2: for grayscale + alpha.
              • 3: for RGB.
              • 4: for RGBA.

              The ZLIB compression level, compression, can be -1 for the PNG-encoder default or a value from 0 to 9. 9 is the highest compression level, generating - the smallest output, but is slower.

              encodePng'

              Arguments

              :: OneOf `[Word16, Word8]` t 
              => OpParams 
              -> Tensor v'1 t

              image: 3-D with shape `[height, width, channels]`.

              -> Tensor Build ByteString

              contents: 0-D. PNG-encoded image.

              enter

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              data: The tensor to be made available to the child frame.

              -> Tensor Build t

              output: The same tensor as `data`.

              Creates or finds a child frame, and makes `data` available to the child frame.

              This op is used together with Exit to create loops in the graph. + the smallest output, but is slower.

              encodePng' Source #

              Arguments

              :: OneOf '[Word16, Word8] t 
              => OpParams 
              -> Tensor v'1 t

              image: 3-D with shape `[height, width, channels]`.

              -> Tensor Build ByteString

              contents: 0-D. PNG-encoded image.

              encodeWav Source #

              Arguments

              :: Tensor v'1 Float

              audio: 2-D with shape `[length, channels]`.

              -> Tensor v'2 Int32

              sample_rate: Scalar containing the sample frequency.

              -> Tensor Build ByteString

              contents: 0-D. WAV-encoded file contents.

              Encode audio data using the WAV file format.

              This operation will generate a string suitable to be saved out to create a .wav + audio file. It will be encoded in the 16-bit PCM format. It takes in float + values in the range -1.0f to 1.0f, and any outside that value will be clamped to + that range.

              audio is a 2-D float Tensor of shape `[length, channels]`. + sample_rate is a scalar Tensor holding the rate to use (e.g. 44100).

              encodeWav' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              audio: 2-D with shape `[length, channels]`.

              -> Tensor v'2 Int32

              sample_rate: Scalar containing the sample frequency.

              -> Tensor Build ByteString

              contents: 0-D. WAV-encoded file contents.

              enter Source #

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              data: The tensor to be made available to the child frame.

              -> Tensor Build t

              output: The same tensor as `data`.

              Creates or finds a child frame, and makes `data` available to the child frame.

              This op is used together with Exit to create loops in the graph. The unique frame_name is used by the Executor to identify frames. If is_constant is true, output is a constant in the child frame; otherwise it may be changed in the child frame. At most parallel_iterations iterations - are run in parallel in the child frame.

              enter'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              data: The tensor to be made available to the child frame.

              -> Tensor Build t

              output: The same tensor as `data`.

              equal

              Returns the truth value of (x == y) element-wise.

              • NOTE*: Equal supports broadcasting. More about broadcasting - here

              erf

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes the Gauss error function of x element-wise.

              erf'

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build t

              y

              erfc

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes the complementary error function of x element-wise.

              erfc'

              Arguments

              :: OneOf `[Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build t

              y

              exit

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              data: The tensor to be made available to the parent frame.

              -> Tensor Build t

              output: The same tensor as `data`.

              Exits the current frame to its parent frame.

              Exit makes its input `data` available to the parent frame.

              exit'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              data: The tensor to be made available to the parent frame.

              -> Tensor Build t

              output: The same tensor as `data`.

              exp

              Arguments

              :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes exponential of x element-wise. \(y = e^x\).

              exp'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build t

              y

              expandDims

              Arguments

              :: (TensorType t, OneOf `[Int32, Int64]` tdim) 
              => Tensor v'1 t

              input

              -> Tensor v'2 tdim

              dim: 0-D (scalar). Specifies the dimension index at which to - expand the shape of input.

              -> Tensor Build t

              output: Contains the same data as input, but its shape has an additional + are run in parallel in the child frame.

              enter' Source #

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              data: The tensor to be made available to the child frame.

              -> Tensor Build t

              output: The same tensor as `data`.

              equal Source #

              Returns the truth value of (x == y) element-wise.

              • NOTE*: Equal supports broadcasting. More about broadcasting + here

              erf Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes the Gauss error function of x element-wise.

              erf' Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build t

              y

              erfc Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes the complementary error function of x element-wise.

              erfc' Source #

              Arguments

              :: OneOf '[Word16, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build t

              y

              exit Source #

              Arguments

              :: TensorType t 
              => Tensor v'1 t

              data: The tensor to be made available to the parent frame.

              -> Tensor Build t

              output: The same tensor as `data`.

              Exits the current frame to its parent frame.

              Exit makes its input `data` available to the parent frame.

              exit' Source #

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 t

              data: The tensor to be made available to the parent frame.

              -> Tensor Build t

              output: The same tensor as `data`.

              exp Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes exponential of x element-wise. \(y = e^x\).

              expandDims Source #

              Arguments

              :: (TensorType t, OneOf '[Int32, Int64] tdim) 
              => Tensor v'1 t

              input

              -> Tensor v'2 tdim

              dim: 0-D (scalar). Specifies the dimension index at which to + expand the shape of input.

              -> Tensor Build t

              output: Contains the same data as input, but its shape has an additional dimension of size 1 added.

              Inserts a dimension of 1 into a tensor's shape.

              Given a tensor input, this operation inserts a dimension of 1 at the dimension index dim of input's shape. The dimension index dim starts at zero; if you specify a negative number for dim it is counted backward from the end.

              This operation is useful if you want to add a batch dimension to a single element. For example, if you have a single image of shape `[height, width, channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, - which will make the shape `[1, height, width, channels]`.

              Other examples:

              ```prettyprint + which will make the shape `[1, height, width, channels]`.

              Other examples:

              ``` # t is a tensor of shape [2] shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> [2, 1] @@ -1053,12 +1133,12 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] ```

              This operation requires that:

              `-1-input.dims() <= dim <= input.dims()`

              This operation is related to `squeeze()`, which removes dimensions of - size 1.

              expandDims'

              Arguments

              :: (TensorType t, OneOf `[Int32, Int64]` tdim) 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor v'2 tdim

              dim: 0-D (scalar). Specifies the dimension index at which to - expand the shape of input.

              -> Tensor Build t

              output: Contains the same data as input, but its shape has an additional - dimension of size 1 added.

              expm1

              Arguments

              :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes exponential of x - 1 element-wise.

              I.e., \(y = (exp x) - 1\).

              expm1'

              Arguments

              :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor Build t

              y

              extractGlimpse

              Arguments

              :: Tensor v'1 Float

              input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.

              -> Tensor v'2 Int32

              size: A 1-D tensor of 2 elements containing the size of the glimpses + size 1.

              expandDims' Source #

              Arguments

              :: (TensorType t, OneOf '[Int32, Int64] tdim) 
              => OpParams 
              -> Tensor v'1 t

              input

              -> Tensor v'2 tdim

              dim: 0-D (scalar). Specifies the dimension index at which to + expand the shape of input.

              -> Tensor Build t

              output: Contains the same data as input, but its shape has an additional + dimension of size 1 added.

              expm1 Source #

              Arguments

              :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
              => Tensor v'1 t

              x

              -> Tensor Build t

              y

              Computes exponential of x - 1 element-wise.

              I.e., \(y = (exp x) - 1\).

              extractGlimpse Source #

              Arguments

              :: Tensor v'1 Float

              input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.

              -> Tensor v'2 Int32

              size: A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following - by the glimpse width.

              -> Tensor v'3 Float

              offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing - the x, y locations of the center of each window.

              -> Tensor Build Float

              glimpse: A tensor representing the glimpses `[batch_size, + by the glimpse width.

              -> Tensor v'3 Float

              offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing + the y, x locations of the center of each window.

              -> Tensor Build Float

              glimpse: A tensor representing the glimpses `[batch_size, glimpse_height, glimpse_width, channels]`.

              Extracts a glimpse from the input tensor.

              Returns a set of windows called glimpses extracted at location offsets from the input tensor. If the windows only partially overlaps the inputs, the non overlapping areas will be filled with @@ -1070,72 +1150,94 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core dimension.

            • If the coordinates are both normalized and centered, they range from
            • 1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0).
            • If the coordinates are not normalized they are interpreted as - numbers of pixels.
            • extractGlimpse'

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.

              -> Tensor v'2 Int32

              size: A 1-D tensor of 2 elements containing the size of the glimpses + numbers of pixels.

              extractGlimpse' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.

              -> Tensor v'2 Int32

              size: A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following - by the glimpse width.

              -> Tensor v'3 Float

              offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing - the x, y locations of the center of each window.

              -> Tensor Build Float

              glimpse: A tensor representing the glimpses `[batch_size, - glimpse_height, glimpse_width, channels]`.

              extractImagePatches

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.

              -> Tensor Build t

              patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * + by the glimpse width.

              -> Tensor v'3 Float

              offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing + the y, x locations of the center of each window.

              -> Tensor Build Float

              glimpse: A tensor representing the glimpses `[batch_size, + glimpse_height, glimpse_width, channels]`.

              extractImagePatches Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => Tensor v'1 t

              images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.

              -> Tensor Build t

              patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * ksize_cols * depth]` containing image patches with size - `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.

              Extract patches from images and put them in the "depth" output dimension.

              extractImagePatches'

              Arguments

              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.

              -> Tensor Build t

              patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * + `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note + out_rows and out_cols are the dimensions of the output patches.

              Extract patches from images and put them in the "depth" output dimension.

              extractImagePatches' Source #

              Arguments

              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.

              -> Tensor Build t

              patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * ksize_cols * depth]` containing image patches with size - `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.

              fFT

              Arguments

              :: Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most - dimension of input is replaced with its 1D Fourier Transform.

              Compute the 1-dimensional discrete Fourier Transform over the inner-most

              dimension of input.

              fFT'

              Arguments

              :: OpParams 
              -> Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most - dimension of input is replaced with its 1D Fourier Transform.

              fFT2D

              Arguments

              :: Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most 2 - dimensions of input are replaced with their 2D Fourier Transform.

              compatibility(numpy) - Equivalent to np.fft2 - end_compatibility

              Compute the 2-dimensional discrete Fourier Transform over the inner-most

              2 dimensions of input.

              fFT2D'

              Arguments

              :: OpParams 
              -> Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most 2 - dimensions of input are replaced with their 2D Fourier Transform.

              compatibility(numpy) - Equivalent to np.fft2 - end_compatibility

              fFT3D

              Arguments

              :: Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most 3 - dimensions of input are replaced with their 3D Fourier Transform.

              compatibility(numpy) - Equivalent to np.fft3 - end_compatibility

              Compute the 3-dimensional discrete Fourier Transform over the inner-most 3

              dimensions of input.

              fFT3D'

              Arguments

              :: OpParams 
              -> Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most 3 - dimensions of input are replaced with their 3D Fourier Transform.

              compatibility(numpy) - Equivalent to np.fft3 - end_compatibility

              fIFOQueue

              Arguments

              :: MonadBuild m' 
              => [DataType]

              component_types: The type of each component in a value.

              -> m' (Tensor Ref ByteString)

              handle: The handle to the queue.

              A queue that produces elements in first-in first-out order.

              fIFOQueue'

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> [DataType]

              component_types: The type of each component in a value.

              -> m' (Tensor Ref ByteString)

              handle: The handle to the queue.

              fIFOQueueV2

              Arguments

              :: MonadBuild m' 
              => [DataType]

              component_types: The type of each component in a value.

              -> m' ResourceHandle

              handle: The handle to the queue.

              A queue that produces elements in first-in first-out order.

              fIFOQueueV2'

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> [DataType]

              component_types: The type of each component in a value.

              -> m' ResourceHandle

              handle: The handle to the queue.

              fact

              Arguments

              :: Tensor Build ByteString

              fact

              Output a fact about factorials.

              fact'

              Arguments

              :: OpParams 
              -> Tensor Build ByteString

              fact

              fakeQuantWithMinMaxArgs

              Arguments

              :: Tensor v'1 Float

              inputs

              -> Tensor Build Float

              outputs

              Fake-quantize the inputs tensor, type float to outputs tensor of same type.

              Attributes [min; max] define the clamping range for the inputs data. Op - divides this range into 255 steps (total of 256 values), then replaces each - inputs value with the closest of the quantized step values.

              Quantization is called fake since the output is still in floating point.

              fakeQuantWithMinMaxArgs'

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              inputs

              -> Tensor Build Float

              outputs

              fakeQuantWithMinMaxArgsGradient

              Arguments

              :: Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.

              -> Tensor Build Float

              backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: - `gradients * (inputs >= min && inputs <= max)`.

              Compute gradients for a FakeQuantWithMinMaxArgs operation.

              fakeQuantWithMinMaxArgsGradient'

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.

              -> Tensor Build Float

              backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: - `gradients * (inputs >= min && inputs <= max)`.

              fakeQuantWithMinMaxVars

              Arguments

              :: Tensor v'1 Float

              inputs

              -> Tensor v'2 Float

              min

              -> Tensor v'3 Float

              max

              -> Tensor Build Float

              outputs

              Fake-quantize the inputs tensor of type float and shape `[b, h, w, d]` via

              global float scalars min and max to outputs tensor of same shape as - inputs.

              min; max
              is the clamping range for the inputs data. Op divides this range - into 255 steps (total of 256 values), then replaces each inputs value with the - closest of the quantized step values.

              This operation has a gradient and thus allows for training min and max values.

              fakeQuantWithMinMaxVars'

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              inputs

              -> Tensor v'2 Float

              min

              -> Tensor v'3 Float

              max

              -> Tensor Build Float

              outputs

              fakeQuantWithMinMaxVarsGradient

              Arguments

              :: Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. - min, max: Quantization interval, scalar floats.

              -> Tensor v'3 Float

              min

              -> Tensor v'4 Float

              max

              -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

              (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

              • backprops_wrt_input: Backpropagated gradients w.r.t. inputs: + `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note + out_rows and out_cols are the dimensions of the output patches.

              fFT Source #

              Arguments

              :: Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its 1D Fourier transform.

              compatibility(numpy) + Equivalent to np.fft.fft + end_compatibility

              Fast Fourier transform.

              Computes the 1-dimensional discrete Fourier transform over the inner-most + dimension of input.

              fFT' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its 1D Fourier transform.

              compatibility(numpy) + Equivalent to np.fft.fft + end_compatibility

              fFT2D Source #

              Arguments

              :: Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their 2D Fourier transform.

              compatibility(numpy) + Equivalent to np.fft.fft2 + end_compatibility

              2D fast Fourier transform.

              Computes the 2-dimensional discrete Fourier transform over the inner-most + 2 dimensions of input.

              fFT2D' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their 2D Fourier transform.

              compatibility(numpy) + Equivalent to np.fft.fft2 + end_compatibility

              fFT3D Source #

              Arguments

              :: Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their 3D Fourier transform.

              compatibility(numpy) + Equivalent to np.fft.fftn with 3 dimensions. + end_compatibility

              3D fast Fourier transform.

              Computes the 3-dimensional discrete Fourier transform over the inner-most 3 + dimensions of input.

              fFT3D' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 (Complex Float)

              input: A complex64 tensor.

              -> Tensor Build (Complex Float)

              output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their 3D Fourier transform.

              compatibility(numpy) + Equivalent to np.fft.fftn with 3 dimensions. + end_compatibility

              fIFOQueue Source #

              Arguments

              :: MonadBuild m' 
              => [DataType]

              component_types: The type of each component in a value.

              -> m' (Tensor Ref ByteString)

              handle: The handle to the queue.

              A queue that produces elements in first-in first-out order.

              fIFOQueue' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> [DataType]

              component_types: The type of each component in a value.

              -> m' (Tensor Ref ByteString)

              handle: The handle to the queue.

              fIFOQueueV2 Source #

              Arguments

              :: MonadBuild m' 
              => [DataType]

              component_types: The type of each component in a value.

              -> m' (Tensor Value ResourceHandle)

              handle: The handle to the queue.

              A queue that produces elements in first-in first-out order.

              fIFOQueueV2' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> [DataType]

              component_types: The type of each component in a value.

              -> m' (Tensor Value ResourceHandle)

              handle: The handle to the queue.

              fact Source #

              Arguments

              :: Tensor Build ByteString

              fact

              Output a fact about factorials.

              fakeQuantWithMinMaxArgs Source #

              Arguments

              :: Tensor v'1 Float

              inputs

              -> Tensor Build Float

              outputs

              Fake-quantize the inputs tensor, type float to outputs tensor of same type.

              Attributes `[min; max]` define the clamping range for the inputs data. + inputs values are quantized into the quantization range (`[0; 2^num_bits - 1]` + when narrow_range is false and `[1; 2^num_bits - 1]` when it is true) and + then de-quantized and output as floats in `[min; max]` interval. + num_bits is the bitwidth of the quantization; between 2 and 8, inclusive.

              Quantization is called fake since the output is still in floating point.

              fakeQuantWithMinMaxArgsGradient Source #

              Arguments

              :: Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.

              -> Tensor Build Float

              backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: + `gradients * (inputs >= min && inputs <= max)`.

              Compute gradients for a FakeQuantWithMinMaxArgs operation.

              fakeQuantWithMinMaxArgsGradient' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.

              -> Tensor Build Float

              backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: + `gradients * (inputs >= min && inputs <= max)`.

              fakeQuantWithMinMaxVars Source #

              Arguments

              :: Tensor v'1 Float

              inputs

              -> Tensor v'2 Float

              min

              -> Tensor v'3 Float

              max

              -> Tensor Build Float

              outputs

              Fake-quantize the inputs tensor of type float via global float scalars min

              and max to outputs tensor of same shape as inputs.

              `[min; max]` define the clamping range for the inputs data. + inputs values are quantized into the quantization range (`[0; 2^num_bits - 1]` + when narrow_range is false and `[1; 2^num_bits - 1]` when it is true) and + then de-quantized and output as floats in `[min; max]` interval. + num_bits is the bitwidth of the quantization; between 2 and 8, inclusive.

              This operation has a gradient and thus allows for training min and max + values.

              fakeQuantWithMinMaxVars' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              inputs

              -> Tensor v'2 Float

              min

              -> Tensor v'3 Float

              max

              -> Tensor Build Float

              outputs

              fakeQuantWithMinMaxVarsGradient Source #

              Arguments

              :: Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. + min, max: Quantization interval, scalar floats.

              -> Tensor v'3 Float

              min

              -> Tensor v'4 Float

              max

              -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

              (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

              • backprops_wrt_input: Backpropagated gradients w.r.t. inputs: `gradients * (inputs >= min && inputs <= max)`.
              • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter: `sum(gradients * (inputs < min))`.
              • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter: - `sum(gradients * (inputs > max))`.

              Compute gradients for a FakeQuantWithMinMaxVars operation.

              fakeQuantWithMinMaxVarsGradient'

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. - min, max: Quantization interval, scalar floats.

              -> Tensor v'3 Float

              min

              -> Tensor v'4 Float

              max

              -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

              (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

              • backprops_wrt_input: Backpropagated gradients w.r.t. inputs: + `sum(gradients * (inputs > max))`.

              Compute gradients for a FakeQuantWithMinMaxVars operation.

              fakeQuantWithMinMaxVarsGradient' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. + min, max: Quantization interval, scalar floats.

              -> Tensor v'3 Float

              min

              -> Tensor v'4 Float

              max

              -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

              (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

              • backprops_wrt_input: Backpropagated gradients w.r.t. inputs: `gradients * (inputs >= min && inputs <= max)`.
              • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter: `sum(gradients * (inputs < min))`.
              • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter: - `sum(gradients * (inputs > max))`.

              fakeQuantWithMinMaxVarsPerChannel

              Arguments

              :: Tensor v'1 Float

              inputs

              -> Tensor v'2 Float

              min

              -> Tensor v'3 Float

              max

              -> Tensor Build Float

              outputs

              Fake-quantize the inputs tensor of type float and one of the shapes: `[d]`,

              `[b, d]` `[b, h, w, d]` via per-channel floats min and max of shape `[d]` - to outputs tensor of same shape as inputs.

              min; max
              is the clamping range for the inputs data in the corresponding - depth channel. Op divides this range into 255 steps (total of 256 values), then - replaces each inputs value with the closest of the quantized step values.

              This operation has a gradient and thus allows for training min and max values.

              fakeQuantWithMinMaxVarsPerChannel'

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              inputs

              -> Tensor v'2 Float

              min

              -> Tensor v'3 Float

              max

              -> Tensor Build Float

              outputs

              fakeQuantWithMinMaxVarsPerChannelGradient

              Arguments

              :: Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, - shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape + `sum(gradients * (inputs > max))`.

              fakeQuantWithMinMaxVarsPerChannel Source #

              Arguments

              :: Tensor v'1 Float

              inputs

              -> Tensor v'2 Float

              min

              -> Tensor v'3 Float

              max

              -> Tensor Build Float

              outputs

              Fake-quantize the inputs tensor of type float and one of the shapes: `[d]`,

              `[b, d]` `[b, h, w, d]` via per-channel floats min and max of shape `[d]` + to outputs tensor of same shape as inputs.

              `[min; max]` define the clamping range for the inputs data. + inputs values are quantized into the quantization range (`[0; 2^num_bits - 1]` + when narrow_range is false and `[1; 2^num_bits - 1]` when it is true) and + then de-quantized and output as floats in `[min; max]` interval. + num_bits is the bitwidth of the quantization; between 2 and 8, inclusive.

              This operation has a gradient and thus allows for training min and max + values.

              fakeQuantWithMinMaxVarsPerChannelGradient Source #

              Arguments

              :: Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, + shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape same as gradients. - min, max: Quantization interval, floats of shape `[d]`.

              -> Tensor v'3 Float

              min

              -> Tensor v'4 Float

              max

              -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

              (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

              • backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as + min, max: Quantization interval, floats of shape `[d]`.

              -> Tensor v'3 Float

              min

              -> Tensor v'4 Float

              max

              -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

              (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

              • backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as inputs: `gradients * (inputs >= min && inputs <= max)`.
              • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`: `sum_per_d(gradients * (inputs < min))`.
              • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`: - `sum_per_d(gradients * (inputs > max))`.

              Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.

              fakeQuantWithMinMaxVarsPerChannelGradient'

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, - shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape + `sum_per_d(gradients * (inputs > max))`.

              Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.

              fakeQuantWithMinMaxVarsPerChannelGradient' Source #

              Arguments

              :: OpParams 
              -> Tensor v'1 Float

              gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, + shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.

              -> Tensor v'2 Float

              inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape same as gradients. - min, max: Quantization interval, floats of shape `[d]`.

              -> Tensor v'3 Float

              min

              -> Tensor v'4 Float

              max

              -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

              (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

              • backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as + min, max: Quantization interval, floats of shape `[d]`.

              -> Tensor v'3 Float

              min

              -> Tensor v'4 Float

              max

              -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

              (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

              • backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as inputs: `gradients * (inputs >= min && inputs <= max)`.
              • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`: `sum_per_d(gradients * (inputs < min))`.
              • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`: - `sum_per_d(gradients * (inputs > max))`.

              fakeQueue

              Arguments

              :: MonadBuild m' 
              => ResourceHandle

              resource

              -> m' (Tensor Ref ByteString)

              handle

              Deprecated. Do not use.

              fakeQueue'

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> ResourceHandle

              resource

              -> m' (Tensor Ref ByteString)

              handle

              fill

              Arguments

              :: TensorType t 
              => Tensor v'1 Int32

              dims: 1-D. Represents the shape of the output tensor.

              -> Tensor v'2 t

              value: 0-D (scalar). Value to fill the returned tensor.

              compatibility(numpy) + `sum_per_d(gradients * (inputs > max))`.

              fakeQueue Source #

              Arguments

              :: MonadBuild m' 
              => Tensor v'1 ResourceHandle

              resource

              -> m' (Tensor Ref ByteString)

              handle

              Deprecated. Do not use.

              fakeQueue' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> Tensor v'1 ResourceHandle

              resource

              -> m' (Tensor Ref ByteString)

              handle

              fill Source #

              Arguments

              :: TensorType t 
              => Tensor v'1 Int32

              dims: 1-D. Represents the shape of the output tensor.

              -> Tensor v'2 t

              value: 0-D (scalar). Value to fill the returned tensor.

              compatibility(numpy) Equivalent to np.full - end_compatibility

              -> Tensor Build t

              output

              Creates a tensor filled with a scalar value.

              This operation creates a tensor of shape dims and fills it with value.

              For example:

              ```prettyprint + end_compatibility

              -> Tensor Build t

              output

              Creates a tensor filled with a scalar value.

              This operation creates a tensor of shape dims and fills it with value.

              For example:

              ``` # Output tensor has shape [2, 3]. fill([2, 3], 9) ==> [[9, 9, 9] [9, 9, 9]] - ```

              fill'

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 Int32

              dims: 1-D. Represents the shape of the output tensor.

              -> Tensor v'2 t

              value: 0-D (scalar). Value to fill the returned tensor.

              compatibility(numpy) + ```

              fill' Source #

              Arguments

              :: TensorType t 
              => OpParams 
              -> Tensor v'1 Int32

              dims: 1-D. Represents the shape of the output tensor.

              -> Tensor v'2 t

              value: 0-D (scalar). Value to fill the returned tensor.

              compatibility(numpy) Equivalent to np.full - end_compatibility

              -> Tensor Build t

              output

              fixedLengthRecordReader

              Arguments

              :: MonadBuild m' 
              => Int64

              record_bytes

              -> m' (Tensor Ref ByteString)

              reader_handle: The handle to reference the Reader.

              A Reader that outputs fixed-length records from a file.

              fixedLengthRecordReader'

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> Int64

              record_bytes

              -> m' (Tensor Ref ByteString)

              reader_handle: The handle to reference the Reader.

              fixedLengthRecordReaderV2

              Arguments

              :: MonadBuild m' 
              => Int64

              record_bytes

              -> m' ResourceHandle

              reader_handle: The handle to reference the Reader.

              A Reader that outputs fixed-length records from a file.

              fixedLengthRecordReaderV2'

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> Int64

              record_bytes

              -> m' ResourceHandle

              reader_handle: The handle to reference the Reader.

              fixedUnigramCandidateSampler

              Arguments

              :: Int64

              num_sampled: Number of candidates to randomly sample per batch.

              -> Int64

              num_true: Number of true labels per context.

              -> Int64

              range_max: The sampler will sample integers from the interval [0, range_max).

              -> Bool

              unique: If unique is true, we sample with rejection, so that all sampled + end_compatibility

              -> Tensor Build t

              output

              fixedLengthRecordDataset Source #

              Arguments

              :: MonadBuild m' 
              => Tensor v'1 ByteString

              filenames: A scalar or a vector containing the name(s) of the file(s) to be + read.

              -> Tensor v'2 Int64

              header_bytes: A scalar representing the number of bytes to skip at the + beginning of a file.

              -> Tensor v'3 Int64

              record_bytes: A scalar representing the number of bytes in each record.

              -> Tensor v'4 Int64

              footer_bytes: A scalar representing the number of bytes to skip at the end + of a file.

              -> m' (Tensor Value ResourceHandle)

              handle

              Creates a dataset that emits the records from one or more binary files.

              fixedLengthRecordDataset' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> Tensor v'1 ByteString

              filenames: A scalar or a vector containing the name(s) of the file(s) to be + read.

              -> Tensor v'2 Int64

              header_bytes: A scalar representing the number of bytes to skip at the + beginning of a file.

              -> Tensor v'3 Int64

              record_bytes: A scalar representing the number of bytes in each record.

              -> Tensor v'4 Int64

              footer_bytes: A scalar representing the number of bytes to skip at the end + of a file.

              -> m' (Tensor Value ResourceHandle)

              handle

              fixedLengthRecordReader Source #

              Arguments

              :: MonadBuild m' 
              => Int64

              record_bytes: Number of bytes in the record.

              -> m' (Tensor Ref ByteString)

              reader_handle: The handle to reference the Reader.

              A Reader that outputs fixed-length records from a file.

              fixedLengthRecordReader' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> Int64

              record_bytes: Number of bytes in the record.

              -> m' (Tensor Ref ByteString)

              reader_handle: The handle to reference the Reader.

              fixedLengthRecordReaderV2 Source #

              Arguments

              :: MonadBuild m' 
              => Int64

              record_bytes: Number of bytes in the record.

              -> m' (Tensor Value ResourceHandle)

              reader_handle: The handle to reference the Reader.

              A Reader that outputs fixed-length records from a file.

              fixedLengthRecordReaderV2' Source #

              Arguments

              :: MonadBuild m' 
              => OpParams 
              -> Int64

              record_bytes: Number of bytes in the record.

              -> m' (Tensor Value ResourceHandle)

              reader_handle: The handle to reference the Reader.

              fixedUnigramCandidateSampler Source #

              Arguments

              :: MonadBuild m' 
              => Int64

              num_sampled: Number of candidates to randomly sample.

              -> Int64

              num_true: Number of true labels per context.

              -> Int64

              range_max: The sampler will sample integers from the interval [0, range_max).

              -> Bool

              unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

              -> Tensor v'1 Int64

              true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

              -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

              (sampled_candidates, true_expected_count, sampled_expected_count)

              • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

              -> Tensor v'1 Int64

              true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

              -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

              (sampled_candidates, true_expected_count, sampled_expected_count)

              floorMod' Source #

              Arguments

              :: OneOf '[Int32, Int64, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              x

              -> Tensor v'2 t

              y

              -> Tensor Build t

              z

              fractionalAvgPool Source #

              Arguments

              :: OneOf '[Int32, Int64, Double, Float] t 
              => Tensor v'1 t

              value: 4-D with shape `[batch, height, width, channels]`.

              -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

              (output, row_pooling_sequence, col_pooling_sequence)

              • output: output tensor after fractional avg pooling.
              • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
              • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

              Performs fractional average pooling on the input.

              Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each - pooling region.

              fractionalAvgPool'

              Arguments

              :: OneOf `[Int32, Int64, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              value: 4-D with shape `[batch, height, width, channels]`.

              -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

              (output, row_pooling_sequence, col_pooling_sequence)

              • output: output tensor after fractional avg pooling.
              • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
              • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

              fractionalAvgPoolGrad

              Arguments

              :: OneOf `[Int32, Int64, Double, Float]` t 
              => Tensor v'1 Int64

              orig_input_tensor_shape: Original input tensor shape for fractional_avg_pool

              -> Tensor v'2 t

              out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients - w.r.t. the output of fractional_avg_pool.

              -> Tensor v'3 Int64

              row_pooling_sequence: row pooling sequence, form pooling region with - col_pooling_sequence.

              -> Tensor v'4 Int64

              col_pooling_sequence: column pooling sequence, form pooling region with - row_pooling sequence.

              -> Tensor Build t

              output: 4-D. Gradients w.r.t. the input of fractional_avg_pool.

              Computes gradient of the FractionalAvgPool function.

              Unlike FractionalMaxPoolGrad, we don't need to find arg_max for + pooling region.

              fractionalAvgPool' Source #

              Arguments

              :: OneOf '[Int32, Int64, Double, Float] t 
              => OpParams 
              -> Tensor v'1 t

              value: 4-D with shape `[batch, height, width, channels]`.

              -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

              (output, row_pooling_sequence, col_pooling_sequence)

              • output: output tensor after fractional avg pooling.
              • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
              • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

              fractionalAvgPoolGrad Source #

              Arguments

              :: OneOf '[Int32, Int64, Double, Float] t 
              => Tensor v'1 Int64

              orig_input_tensor_shape: Original input tensor shape for fractional_avg_pool

              -> Tensor v'2 t

              out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_avg_pool.

              -> Tensor v'3 Int64

              row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

              -> Tensor v'4 Int64

              col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

              -> Tensor Build t

              output: 4-D. Gradients w.r.t. the input of fractional_avg_pool.

              Computes gradient of the FractionalAvgPool function.

              Unlike FractionalMaxPoolGrad, we don't need to find arg_max for FractionalAvgPoolGrad, we just need to evenly back-propagate each element of out_backprop to those indices that form the same pooling cell. Therefore, we just need to know the shape of original input tensor, instead of the whole - tensor.

              fractionalAvgPoolGrad'

              Arguments

              :: OneOf `[Int32, Int64, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 Int64

              orig_input_tensor_shape: Original input tensor shape for fractional_avg_pool

              -> Tensor v'2 t

              out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients - w.r.t. the output of fractional_avg_pool.

              -> Tensor v'3 Int64

              row_pooling_sequence: row pooling sequence, form pooling region with - col_pooling_sequence.

              -> Tensor v'4 Int64

              col_pooling_sequence: column pooling sequence, form pooling region with - row_pooling sequence.

              -> Tensor Build t

              output: 4-D. Gradients w.r.t. the input of fractional_avg_pool.

              fractionalMaxPool

              Arguments

              :: OneOf `[Int32, Int64, Double, Float]` t 
              => Tensor v'1 t

              value: 4-D with shape `[batch, height, width, channels]`.

              -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

              (output, row_pooling_sequence, col_pooling_sequence)

              • output: output tensor after fractional max pooling.
              • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
              • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

              Performs fractional max pooling on the input.

              Fractional max pooling is slightly different than regular max pooling. In + tensor.

              fractionalAvgPoolGrad' Source #

              Arguments

              :: OneOf '[Int32, Int64, Double, Float] t 
              => OpParams 
              -> Tensor v'1 Int64

              orig_input_tensor_shape: Original input tensor shape for fractional_avg_pool

              -> Tensor v'2 t

              out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_avg_pool.

              -> Tensor v'3 Int64

              row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

              -> Tensor v'4 Int64

              col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

              -> Tensor Build t

              output: 4-D. Gradients w.r.t. the input of fractional_avg_pool.

              fractionalMaxPool Source #

              Arguments

              :: OneOf '[Int32, Int64, Double, Float] t 
              => Tensor v'1 t

              value: 4-D with shape `[batch, height, width, channels]`.

              -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

              (output, row_pooling_sequence, col_pooling_sequence)

              • output: output tensor after fractional max pooling.
              • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
              • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

              Performs fractional max pooling on the input.

              Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might @@ -1181,35 +1283,35 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core does not have to be an integer.

              The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries.

              First we define the following:

              1. input_row_length : the number of rows from the input set
              2. output_row_length : which will be smaller than the input
              3. alpha = input_row_length / output_row_length : our reduction ratio
              4. K = floor(alpha)
              5. row_pooling_sequence : this is the result list of pool boundary rows

              Then, row_pooling_sequence should satisfy:

              1. a[0] = 0 : the first value of the sequence is 0
              2. a[end] = input_row_length : the last value of the sequence is the size
              3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
              4. length(row_pooling_sequence) = output_row_length+1

              For more details on fractional max pooling, see this paper: - Benjamin Graham, Fractional Max-Pooling

              fractionalMaxPool'

              Arguments

              :: OneOf `[Int32, Int64, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              value: 4-D with shape `[batch, height, width, channels]`.

              -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

              (output, row_pooling_sequence, col_pooling_sequence)

              • output: output tensor after fractional max pooling.
              • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
              • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

              fractionalMaxPoolGrad

              Arguments

              :: OneOf `[Int32, Int64, Double, Float]` t 
              => Tensor v'1 t

              orig_input: Original input for fractional_max_pool

              -> Tensor v'2 t

              orig_output: Original output for fractional_max_pool

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients - w.r.t. the output of fractional_max_pool.

              -> Tensor v'4 Int64

              row_pooling_sequence: row pooling sequence, form pooling region with - col_pooling_sequence.

              -> Tensor v'5 Int64

              col_pooling_sequence: column pooling sequence, form pooling region with - row_pooling sequence.

              -> Tensor Build t

              output: 4-D. Gradients w.r.t. the input of fractional_max_pool.

              Computes gradient of the FractionalMaxPool function.

              fractionalMaxPoolGrad'

              Arguments

              :: OneOf `[Int32, Int64, Double, Float]` t 
              => OpParams 
              -> Tensor v'1 t

              orig_input: Original input for fractional_max_pool

              -> Tensor v'2 t

              orig_output: Original output for fractional_max_pool

              -> Tensor v'3 t

              out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients - w.r.t. the output of fractional_max_pool.

              -> Tensor v'4 Int64

              row_pooling_sequence: row pooling sequence, form pooling region with - col_pooling_sequence.

              -> Tensor v'5 Int64

              col_pooling_sequence: column pooling sequence, form pooling region with - row_pooling sequence.

              -> Tensor Build t

              output: 4-D. Gradients w.r.t. the input of fractional_max_pool.

              fusedBatchNorm

              Arguments

              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
              => Tensor v'1 t

              x: A 4D Tensor for input data.

              -> Tensor v'2 t

              scale: A 1D Tensor for scaling factor, to scale the normalized x.

              -> Tensor v'3 t

              offset: A 1D Tensor for offset, to shift to the normalized x.

              -> Tensor v'4 t

              mean: A 1D Tensor for population mean. Used for inference only; - must be empty for training.

              -> Tensor v'5 t

              variance: A 1D Tensor for population variance. Used for inference only; - must be empty for training.

              -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

              (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2)

              • y: A 4D Tensor for output data.
              • batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow + Benjamin Graham, Fractional Max-Pooling

                fractionalMaxPool' Source #

                Arguments

                :: OneOf '[Int32, Int64, Double, Float] t 
                => OpParams 
                -> Tensor v'1 t

                value: 4-D with shape `[batch, height, width, channels]`.

                -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

                (output, row_pooling_sequence, col_pooling_sequence)

                • output: output tensor after fractional max pooling.
                • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
                • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

                fractionalMaxPoolGrad Source #

                Arguments

                :: OneOf '[Int32, Int64, Double, Float] t 
                => Tensor v'1 t

                orig_input: Original input for fractional_max_pool

                -> Tensor v'2 t

                orig_output: Original output for fractional_max_pool

                -> Tensor v'3 t

                out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_max_pool.

                -> Tensor v'4 Int64

                row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

                -> Tensor v'5 Int64

                col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

                -> Tensor Build t

                output: 4-D. Gradients w.r.t. the input of fractional_max_pool.

                Computes gradient of the FractionalMaxPool function.

                fractionalMaxPoolGrad' Source #

                Arguments

                :: OneOf '[Int32, Int64, Double, Float] t 
                => OpParams 
                -> Tensor v'1 t

                orig_input: Original input for fractional_max_pool

                -> Tensor v'2 t

                orig_output: Original output for fractional_max_pool

                -> Tensor v'3 t

                out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_max_pool.

                -> Tensor v'4 Int64

                row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

                -> Tensor v'5 Int64

                col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

                -> Tensor Build t

                output: 4-D. Gradients w.r.t. the input of fractional_max_pool.

                fusedBatchNorm Source #

                Arguments

                :: OneOf '[Float] t 
                => Tensor v'1 t

                x: A 4D Tensor for input data.

                -> Tensor v'2 t

                scale: A 1D Tensor for scaling factor, to scale the normalized x.

                -> Tensor v'3 t

                offset: A 1D Tensor for offset, to shift to the normalized x.

                -> Tensor v'4 t

                mean: A 1D Tensor for population mean. Used for inference only; + must be empty for training.

                -> Tensor v'5 t

                variance: A 1D Tensor for population variance. Used for inference only; + must be empty for training.

                -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

                (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2)

                • y: A 4D Tensor for output data.
                • batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow to compute the running mean.
                • batch_variance: A 1D Tensor for the computed batch variance, to be used by TensorFlow to compute the running variance.
                • reserve_space_1: A 1D Tensor for the computed batch mean, to be reused in the gradient computation.
                • reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance in the cuDNN case), to be used in the gradient computation.

                Batch normalization.

                Note that the size of 4D Tensors are defined by either NHWC or NCHW. - The size of 1D Tensors matches the dimension C of the 4D Tensors.

                fusedBatchNorm'

                Arguments

                :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                => OpParams 
                -> Tensor v'1 t

                x: A 4D Tensor for input data.

                -> Tensor v'2 t

                scale: A 1D Tensor for scaling factor, to scale the normalized x.

                -> Tensor v'3 t

                offset: A 1D Tensor for offset, to shift to the normalized x.

                -> Tensor v'4 t

                mean: A 1D Tensor for population mean. Used for inference only; - must be empty for training.

                -> Tensor v'5 t

                variance: A 1D Tensor for population variance. Used for inference only; - must be empty for training.

                -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

                (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2)

                • y: A 4D Tensor for output data.
                • batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow + The size of 1D Tensors matches the dimension C of the 4D Tensors.

                  fusedBatchNorm' Source #

                  Arguments

                  :: OneOf '[Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  x: A 4D Tensor for input data.

                  -> Tensor v'2 t

                  scale: A 1D Tensor for scaling factor, to scale the normalized x.

                  -> Tensor v'3 t

                  offset: A 1D Tensor for offset, to shift to the normalized x.

                  -> Tensor v'4 t

                  mean: A 1D Tensor for population mean. Used for inference only; + must be empty for training.

                  -> Tensor v'5 t

                  variance: A 1D Tensor for population variance. Used for inference only; + must be empty for training.

                  -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

                  (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2)

                  • y: A 4D Tensor for output data.
                  • batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow to compute the running mean.
                  • batch_variance: A 1D Tensor for the computed batch variance, to be used by TensorFlow to compute the running variance.
                  • reserve_space_1: A 1D Tensor for the computed batch mean, to be reused in the gradient computation.
                  • reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance - in the cuDNN case), to be used in the gradient computation.

                  fusedBatchNormGrad

                  Arguments

                  :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => Tensor v'1 t

                  y_backprop: A 4D Tensor for the gradient with respect to y.

                  -> Tensor v'2 t

                  x: A 4D Tensor for input data.

                  -> Tensor v'3 t

                  scale: A 1D Tensor for scaling factor, to scale the normalized x.

                  -> Tensor v'4 t

                  reserve_space_1: A 1D Tensor for the computed batch mean, to be reused - in the gradient computation.

                  -> Tensor v'5 t

                  reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance - in the cuDNN case), to be used in the gradient computation.

                  -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

                  (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4)

                  • x_backprop: A 4D Tensor for the gradient with respect to x.
                  • scale_backprop: A 1D Tensor for the gradient with respect to scale.
                  • offset_backprop: A 1D Tensor for the gradient with respect to offset.
                  • reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
                  • reserve_space_4: Unused placeholder to match the variance input + in the cuDNN case), to be used in the gradient computation.

                  fusedBatchNormGrad Source #

                  Arguments

                  :: OneOf '[Float] t 
                  => Tensor v'1 t

                  y_backprop: A 4D Tensor for the gradient with respect to y.

                  -> Tensor v'2 t

                  x: A 4D Tensor for input data.

                  -> Tensor v'3 t

                  scale: A 1D Tensor for scaling factor, to scale the normalized x.

                  -> Tensor v'4 t

                  reserve_space_1: A 1D Tensor for the computed batch mean, to be reused + in the gradient computation.

                  -> Tensor v'5 t

                  reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance + in the cuDNN case), to be used in the gradient computation.

                  -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

                  (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4)

                  • x_backprop: A 4D Tensor for the gradient with respect to x.
                  • scale_backprop: A 1D Tensor for the gradient with respect to scale.
                  • offset_backprop: A 1D Tensor for the gradient with respect to offset.
                  • reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
                  • reserve_space_4: Unused placeholder to match the variance input in FusedBatchNorm.

                  Gradient for batch normalization.

                  Note that the size of 4D Tensors are defined by either NHWC or NCHW. - The size of 1D Tensors matches the dimension C of the 4D Tensors.

                  fusedBatchNormGrad'

                  Arguments

                  :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  y_backprop: A 4D Tensor for the gradient with respect to y.

                  -> Tensor v'2 t

                  x: A 4D Tensor for input data.

                  -> Tensor v'3 t

                  scale: A 1D Tensor for scaling factor, to scale the normalized x.

                  -> Tensor v'4 t

                  reserve_space_1: A 1D Tensor for the computed batch mean, to be reused - in the gradient computation.

                  -> Tensor v'5 t

                  reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance - in the cuDNN case), to be used in the gradient computation.

                  -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

                  (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4)

                  • x_backprop: A 4D Tensor for the gradient with respect to x.
                  • scale_backprop: A 1D Tensor for the gradient with respect to scale.
                  • offset_backprop: A 1D Tensor for the gradient with respect to offset.
                  • reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
                  • reserve_space_4: Unused placeholder to match the variance input - in FusedBatchNorm.

                  fusedPadConv2D

                  Arguments

                  :: OneOf `[Word16, Double, Float]` t 
                  => Tensor v'1 t

                  input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

                  -> Tensor v'2 Int32

                  paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

                  -> Tensor v'3 t

                  filter: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`.

                  -> Tensor Build t

                  output

                  Performs a padding as a preprocess during a convolution.

                  Similar to FusedResizeAndPadConv2d, this op allows for an optimized + The size of 1D Tensors matches the dimension C of the 4D Tensors.

                  fusedBatchNormGrad' Source #

                  Arguments

                  :: OneOf '[Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  y_backprop: A 4D Tensor for the gradient with respect to y.

                  -> Tensor v'2 t

                  x: A 4D Tensor for input data.

                  -> Tensor v'3 t

                  scale: A 1D Tensor for scaling factor, to scale the normalized x.

                  -> Tensor v'4 t

                  reserve_space_1: A 1D Tensor for the computed batch mean, to be reused + in the gradient computation.

                  -> Tensor v'5 t

                  reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance + in the cuDNN case), to be used in the gradient computation.

                  -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

                  (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4)

                  • x_backprop: A 4D Tensor for the gradient with respect to x.
                  • scale_backprop: A 1D Tensor for the gradient with respect to scale.
                  • offset_backprop: A 1D Tensor for the gradient with respect to offset.
                  • reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
                  • reserve_space_4: Unused placeholder to match the variance input + in FusedBatchNorm.

                  fusedPadConv2D Source #

                  Arguments

                  :: OneOf '[Float] t 
                  => Tensor v'1 t

                  input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

                  -> Tensor v'2 Int32

                  paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

                  -> Tensor v'3 t

                  filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

                  -> Tensor Build t

                  output

                  Performs a padding as a preprocess during a convolution.

                  Similar to FusedResizeAndPadConv2d, this op allows for an optimized implementation where the spatial padding transformation stage is fused with the im2col lookup, but in this case without the bilinear filtering required for resizing. Fusing the padding prevents the need to write out the intermediate @@ -1219,12 +1321,12 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core order is used instead. Internally this op uses a single per-graph scratch buffer, which means that it will block if multiple versions are being run in parallel. This is because this - operator is primarily an optimization to minimize memory usage.

                  fusedPadConv2D'

                  Arguments

                  :: OneOf `[Word16, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

                  -> Tensor v'2 Int32

                  paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

                  -> Tensor v'3 t

                  filter: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`.

                  -> Tensor Build t

                  output

                  fusedResizeAndPadConv2D

                  Arguments

                  :: OneOf `[Word16, Double, Float]` t 
                  => Tensor v'1 t

                  input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

                  -> Tensor v'2 Int32

                  size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                  -> Tensor v'3 Int32

                  paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

                  -> Tensor v'4 t

                  filter: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`.

                  -> Tensor Build t

                  output

                  Performs a resize and padding as a preprocess during a convolution.

                  It's often possible to do spatial transformations more efficiently as part of + operator is primarily an optimization to minimize memory usage.

                  fusedPadConv2D' Source #

                  Arguments

                  :: OneOf '[Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

                  -> Tensor v'2 Int32

                  paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

                  -> Tensor v'3 t

                  filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

                  -> Tensor Build t

                  output

                  fusedResizeAndPadConv2D Source #

                  Arguments

                  :: OneOf '[Float] t 
                  => Tensor v'1 t

                  input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

                  -> Tensor v'2 Int32

                  size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                  -> Tensor v'3 Int32

                  paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

                  -> Tensor v'4 t

                  filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

                  -> Tensor Build t

                  output

                  Performs a resize and padding as a preprocess during a convolution.

                  It's often possible to do spatial transformations more efficiently as part of the packing stage of a convolution, so this op allows for an optimized implementation where these stages are fused together. This prevents the need to write out the intermediate results as whole tensors, reducing memory pressure, @@ -1233,25 +1335,31 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core NHWC order. Internally this op uses a single per-graph scratch buffer, which means that it will block if multiple versions are being run in parallel. This is because this - operator is primarily an optimization to minimize memory usage.

                  fusedResizeAndPadConv2D'

                  Arguments

                  :: OneOf `[Word16, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

                  -> Tensor v'2 Int32

                  size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                  -> Tensor v'3 Int32

                  paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

                  -> Tensor v'4 t

                  filter: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`.

                  -> Tensor Build t

                  output

                  gather

                  Arguments

                  :: (TensorType tparams, OneOf `[Int32, Int64]` tindices) 
                  => Tensor v'1 tparams

                  params

                  -> Tensor v'2 tindices

                  indices

                  -> Tensor Build tparams

                  output

                  Gather slices from params according to indices.

                  indices must be an integer tensor of any dimension (usually 0-D or 1-D). + operator is primarily an optimization to minimize memory usage.

                  fusedResizeAndPadConv2D' Source #

                  Arguments

                  :: OneOf '[Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

                  -> Tensor v'2 Int32

                  size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                  -> Tensor v'3 Int32

                  paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

                  -> Tensor v'4 t

                  filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

                  -> Tensor Build t

                  output

                  gather Source #

                  Arguments

                  :: (TensorType tparams, OneOf '[Int32, Int64] tindices) 
                  => Tensor v'1 tparams

                  params

                  -> Tensor v'2 tindices

                  indices

                  -> Tensor Build tparams

                  output

                  Gather slices from params according to indices.

                  indices must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output tensor with shape `indices.shape + params.shape[1:]` where:

                  ```python # Scalar indices output[:, ..., :] = params[indices, :, ... :]

                  # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :]

                  # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] ```

                  If indices is a permutation and `len(indices) == params.shape[0]` then - this operation will permute params accordingly.

                  style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/Gather.png" alt - /div

                  gather'

                  Arguments

                  :: (TensorType tparams, OneOf `[Int32, Int64]` tindices) 
                  => OpParams 
                  -> Tensor v'1 tparams

                  params

                  -> Tensor v'2 tindices

                  indices

                  -> Tensor Build tparams

                  output

                  gatherNd

                  Arguments

                  :: (TensorType tparams, OneOf `[Int32, Int64]` tindices) 
                  => Tensor v'1 tparams

                  params: `P-D`. The tensor from which to gather values.

                  -> Tensor v'2 tindices

                  indices: `Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`.

                  -> Tensor Build tparams

                  output: `(P+Q-K-1)-D`. Values from params gathered from indices given by - indices.

                  Gather values or slices from params according to indices.

                  params is a Tensor of rank P and indices is a Tensor of rank Q.

                  indices must be integer tensor, containing indices into params. - It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

                  The innermost dimension of indices (with length K) corresponds to - indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of params.

                  Produces an output tensor with shape

                  ``` - [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. - ```

                  Some examples below.

                  Simple indexing into a matrix:

                  ```python + this operation will permute params accordingly.

                  validate_indices: DEPRECATED. If this operation is assigned to CPU, values in + indices are always validated to be within range. If assigned to GPU, + out-of-bound indices result in safe but unspecified behavior, which may include + raising an error.

                  style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt + /div

                  gather' Source #

                  Arguments

                  :: (TensorType tparams, OneOf '[Int32, Int64] tindices) 
                  => OpParams 
                  -> Tensor v'1 tparams

                  params

                  -> Tensor v'2 tindices

                  indices

                  -> Tensor Build tparams

                  output

                  gatherNd Source #

                  Arguments

                  :: (TensorType tparams, OneOf '[Int32, Int64] tindices) 
                  => Tensor v'1 tparams

                  params: The tensor from which to gather values.

                  -> Tensor v'2 tindices

                  indices: Index tensor.

                  -> Tensor Build tparams

                  output: Values from params gathered from indices given by indices, with + shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.

                  Gather slices from params into a Tensor with shape specified by indices.

                  indices is an K-dimensional integer tensor, best thought of as a + (K-1)-dimensional tensor of indices into params, where each element defines a + slice of params:

                  output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]

                  Whereas in @{tf.gather} indices defines slices into the first + dimension of params, in `tf.gather_nd`, indices defines slices into the + first N dimensions of params, where `N = indices.shape[-1]`.

                  The last dimension of indices can be at most the rank of + params:

                  indices.shape[-1] <= params.rank

                  The last dimension of indices corresponds to elements + (if `indices.shape[-1] == params.rank`) or slices + (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` + of params. The output tensor has shape

                  indices.shape[:-1] + params.shape[indices.shape[-1]:]

                  Some examples below.

                  Simple indexing into a matrix:

                  ```python indices = [[0, 0], [1, 1]] params = [[a, b], [c, d]] output = [a, d] @@ -1291,55 +1399,122 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core params = [[[a0, b0], [c0, d0]], [[a1, b1], [c1, d1]]] output = [[b0, b1], [d0, c1]] - ```

                  gatherNd'

                  Arguments

                  :: (TensorType tparams, OneOf `[Int32, Int64]` tindices) 
                  => OpParams 
                  -> Tensor v'1 tparams

                  params: `P-D`. The tensor from which to gather values.

                  -> Tensor v'2 tindices

                  indices: `Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`.

                  -> Tensor Build tparams

                  output: `(P+Q-K-1)-D`. Values from params gathered from indices given by - indices.

                  getSessionHandle

                  Arguments

                  :: TensorType t 
                  => Tensor v'1 t

                  value: The tensor to be stored.

                  -> Tensor Build ByteString

                  handle: The handle for the tensor stored in the session state.

                  Store the input tensor in the state of the current session.

                  getSessionHandle'

                  Arguments

                  :: TensorType t 
                  => OpParams 
                  -> Tensor v'1 t

                  value: The tensor to be stored.

                  -> Tensor Build ByteString

                  handle: The handle for the tensor stored in the session state.

                  getSessionTensor

                  Arguments

                  :: TensorType dtype 
                  => Tensor v'1 ByteString

                  handle: The handle for a tensor stored in the session state.

                  -> Tensor Build dtype

                  value: The tensor for the given handle.

                  Get the value of the tensor specified by its handle.

                  getSessionTensor'

                  Arguments

                  :: TensorType dtype 
                  => OpParams 
                  -> Tensor v'1 ByteString

                  handle: The handle for a tensor stored in the session state.

                  -> Tensor Build dtype

                  value: The tensor for the given handle.

                  greater

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  Returns the truth value of (x > y) element-wise.

                  • NOTE*: Greater supports broadcasting. More about broadcasting - here

                  greater'

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  greaterEqual

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  Returns the truth value of (x >= y) element-wise.

                  • NOTE*: GreaterEqual supports broadcasting. More about broadcasting - here

                  greaterEqual'

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  hSVToRGB

                  Arguments

                  :: OneOf `[Double, Float]` t 
                  => Tensor v'1 t

                  images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.

                  -> Tensor Build t

                  output: images converted to RGB.

                  Convert one or more images from HSV to RGB.

                  Outputs a tensor of the same shape as the images tensor, containing the RGB + ```

                  gatherNd' Source #

                  Arguments

                  :: (TensorType tparams, OneOf '[Int32, Int64] tindices) 
                  => OpParams 
                  -> Tensor v'1 tparams

                  params: The tensor from which to gather values.

                  -> Tensor v'2 tindices

                  indices: Index tensor.

                  -> Tensor Build tparams

                  output: Values from params gathered from indices given by indices, with + shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.

                  gatherV2 Source #

                  Arguments

                  :: (TensorType tparams, OneOf '[Int32, Int64] tindices, OneOf '[Int32, Int64] taxis) 
                  => Tensor v'1 tparams

                  params: The tensor from which to gather values. Must be at least rank + `axis + 1`.

                  -> Tensor v'2 tindices

                  indices: Index tensor. Must be in range `[0, params.shape[axis])`.

                  -> Tensor v'3 taxis

                  axis: The axis in params to gather indices from. Defaults to the first + dimension. Supports negative indexes.

                  -> Tensor Build tparams

                  output: Values from params gathered from indices given by indices, with + shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.

                  Gather slices from params axis axis according to indices.

                  indices must be an integer tensor of any dimension (usually 0-D or 1-D). + Produces an output tensor with shape `params.shape[:axis] + indices.shape + + params.shape[axis + 1:]` where:

                  ```python + # Scalar indices (output is rank(params) - 1). + output[a_0, ..., a_n, b_0, ..., b_n] = + params[a_0, ..., a_n, indices, b_0, ..., b_n]

                  # Vector indices (output is rank(params)). + output[a_0, ..., a_n, i, b_0, ..., b_n] = + params[a_0, ..., a_n, indices[i], b_0, ..., b_n]

                  # Higher rank indices (output is rank(params) + rank(indices) - 1). + output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] + ```

                  style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt + /div

                  gatherV2' Source #

                  Arguments

                  :: (TensorType tparams, OneOf '[Int32, Int64] tindices, OneOf '[Int32, Int64] taxis) 
                  => OpParams 
                  -> Tensor v'1 tparams

                  params: The tensor from which to gather values. Must be at least rank + `axis + 1`.

                  -> Tensor v'2 tindices

                  indices: Index tensor. Must be in range `[0, params.shape[axis])`.

                  -> Tensor v'3 taxis

                  axis: The axis in params to gather indices from. Defaults to the first + dimension. Supports negative indexes.

                  -> Tensor Build tparams

                  output: Values from params gathered from indices given by indices, with + shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.

                  getSessionHandle Source #

                  Arguments

                  :: TensorType t 
                  => Tensor v'1 t

                  value: The tensor to be stored.

                  -> Tensor Build ByteString

                  handle: The handle for the tensor stored in the session state, represented + as a string.

                  Store the input tensor in the state of the current session.

                  getSessionHandle' Source #

                  Arguments

                  :: TensorType t 
                  => OpParams 
                  -> Tensor v'1 t

                  value: The tensor to be stored.

                  -> Tensor Build ByteString

                  handle: The handle for the tensor stored in the session state, represented + as a string.

                  getSessionHandleV2 Source #

                  Arguments

                  :: (MonadBuild m', TensorType t) 
                  => Tensor v'1 t

                  value: The tensor to be stored.

                  -> m' (Tensor Value ResourceHandle)

                  handle: The handle for the tensor stored in the session state, represented + as a ResourceHandle object.

                  Store the input tensor in the state of the current session.

                  getSessionHandleV2' Source #

                  Arguments

                  :: (MonadBuild m', TensorType t) 
                  => OpParams 
                  -> Tensor v'1 t

                  value: The tensor to be stored.

                  -> m' (Tensor Value ResourceHandle)

                  handle: The handle for the tensor stored in the session state, represented + as a ResourceHandle object.

                  getSessionTensor Source #

                  Arguments

                  :: TensorType dtype 
                  => Tensor v'1 ByteString

                  handle: The handle for a tensor stored in the session state.

                  -> Tensor Build dtype

                  value: The tensor for the given handle.

                  Get the value of the tensor specified by its handle.

                  getSessionTensor' Source #

                  Arguments

                  :: TensorType dtype 
                  => OpParams 
                  -> Tensor v'1 ByteString

                  handle: The handle for a tensor stored in the session state.

                  -> Tensor Build dtype

                  value: The tensor for the given handle.

                  greater Source #

                  Arguments

                  :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                  => Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  Returns the truth value of (x > y) element-wise.

                  • NOTE*: Greater supports broadcasting. More about broadcasting + here

                  greater' Source #

                  Arguments

                  :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  greaterEqual Source #

                  Arguments

                  :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                  => Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  Returns the truth value of (x >= y) element-wise.

                  • NOTE*: GreaterEqual supports broadcasting. More about broadcasting + here

                  hSVToRGB Source #

                  Arguments

                  :: OneOf '[Double, Float] t 
                  => Tensor v'1 t

                  images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.

                  -> Tensor Build t

                  output: images converted to RGB.

                  Convert one or more images from HSV to RGB.

                  Outputs a tensor of the same shape as the images tensor, containing the RGB value of the pixels. The output is only well defined if the value in images - are in `[0,1]`.

                  See rgb_to_hsv for a description of the HSV encoding.

                  hSVToRGB'

                  Arguments

                  :: OneOf `[Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.

                  -> Tensor Build t

                  output: images converted to RGB.

                  hashTable

                  Arguments

                  :: MonadBuild m' 
                  => DataType

                  key_dtype: Type of the table keys.

                  -> DataType

                  value_dtype: Type of the table values.

                  -> m' (Tensor Ref ByteString)

                  table_handle: Handle to a table.

                  Creates a non-initialized hash table.

                  This op creates a hash table, specifying the type of its keys and values. + are in `[0,1]`.

                  See rgb_to_hsv for a description of the HSV encoding.

                  hSVToRGB' Source #

                  Arguments

                  :: OneOf '[Double, Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.

                  -> Tensor Build t

                  output: images converted to RGB.

                  hashTable Source #

                  Arguments

                  :: MonadBuild m' 
                  => DataType

                  key_dtype: Type of the table keys.

                  -> DataType

                  value_dtype: Type of the table values.

                  -> m' (Tensor Ref ByteString)

                  table_handle: Handle to a table.

                  Creates a non-initialized hash table.

                  This op creates a hash table, specifying the type of its keys and values. Before using the table you will have to initialize it. After initialization the - table will be immutable.

                  hashTable'

                  Arguments

                  :: MonadBuild m' 
                  => OpParams 
                  -> DataType

                  key_dtype: Type of the table keys.

                  -> DataType

                  value_dtype: Type of the table values.

                  -> m' (Tensor Ref ByteString)

                  table_handle: Handle to a table.

                  histogramSummary

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => Tensor v'1 ByteString

                  tag: Scalar. Tag to use for the Value.

                  -> Tensor v'2 t

                  values: Any shape. Values to use to build the histogram.

                  -> Tensor Build ByteString

                  summary: Scalar. Serialized Summary protocol buffer.

                  Outputs a Summary protocol buffer with a histogram.

                  The generated + table will be immutable.

                  hashTable' Source #

                  Arguments

                  :: MonadBuild m' 
                  => OpParams 
                  -> DataType

                  key_dtype: Type of the table keys.

                  -> DataType

                  value_dtype: Type of the table values.

                  -> m' (Tensor Ref ByteString)

                  table_handle: Handle to a table.

                  hashTableV2 Source #

                  Arguments

                  :: MonadBuild m' 
                  => DataType

                  key_dtype: Type of the table keys.

                  -> DataType

                  value_dtype: Type of the table values.

                  -> m' (Tensor Value ResourceHandle)

                  table_handle: Handle to a table.

                  Creates a non-initialized hash table.

                  This op creates a hash table, specifying the type of its keys and values. + Before using the table you will have to initialize it. After initialization the + table will be immutable.

                  hashTableV2' Source #

                  Arguments

                  :: MonadBuild m' 
                  => OpParams 
                  -> DataType

                  key_dtype: Type of the table keys.

                  -> DataType

                  value_dtype: Type of the table values.

                  -> m' (Tensor Value ResourceHandle)

                  table_handle: Handle to a table.

                  histogramSummary Source #

                  Arguments

                  :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                  => Tensor v'1 ByteString

                  tag: Scalar. Tag to use for the Value.

                  -> Tensor v'2 t

                  values: Any shape. Values to use to build the histogram.

                  -> Tensor Build ByteString

                  summary: Scalar. Serialized Summary protocol buffer.

                  Outputs a Summary protocol buffer with a histogram.

                  The generated `Summary` - has one summary value containing a histogram for values.

                  This op reports an InvalidArgument error if any value is not finite.

                  histogramSummary'

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 ByteString

                  tag: Scalar. Tag to use for the Value.

                  -> Tensor v'2 t

                  values: Any shape. Values to use to build the histogram.

                  -> Tensor Build ByteString

                  summary: Scalar. Serialized Summary protocol buffer.

                  iFFT

                  Arguments

                  :: Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most - dimension of input is replaced with its inverse 1D Fourier Transform.

                  Compute the inverse 1-dimensional discrete Fourier Transform over the inner-most

                  dimension of input.

                  iFFT'

                  Arguments

                  :: OpParams 
                  -> Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most - dimension of input is replaced with its inverse 1D Fourier Transform.

                  iFFT2D

                  Arguments

                  :: Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most 2 - dimensions of input are replaced with their inverse 2D Fourier Transform.

                  compatibility(numpy) - Equivalent to np.ifft2 - end_compatibility

                  Compute the inverse 2-dimensional discrete Fourier Transform over the inner-most

                  2 dimensions of input.

                  iFFT2D'

                  Arguments

                  :: OpParams 
                  -> Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most 2 - dimensions of input are replaced with their inverse 2D Fourier Transform.

                  compatibility(numpy) - Equivalent to np.ifft2 - end_compatibility

                  iFFT3D

                  Arguments

                  :: Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most 3 - dimensions of input are replaced with their inverse 3D Fourier Transform.

                  compatibility(numpy) - Equivalent to np.fft3 - end_compatibility

                  Compute the inverse 3-dimensional discrete Fourier Transform over the inner-most

                  3 dimensions of input.

                  iFFT3D'

                  Arguments

                  :: OpParams 
                  -> Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most 3 - dimensions of input are replaced with their inverse 3D Fourier Transform.

                  compatibility(numpy) - Equivalent to np.fft3 - end_compatibility

                  identity

                  Arguments

                  :: TensorType t 
                  => Tensor v'1 t

                  input

                  -> Tensor Build t

                  output

                  Return a tensor with the same shape and contents as the input tensor or value.

                  identity'

                  Arguments

                  :: TensorType t 
                  => OpParams 
                  -> Tensor v'1 t

                  input

                  -> Tensor Build t

                  output

                  identityReader

                  Arguments

                  :: MonadBuild m' 
                  => m' (Tensor Ref ByteString)

                  reader_handle: The handle to reference the Reader.

                  A Reader that outputs the queued work as both the key and value.

                  To use, enqueue strings in a Queue. ReaderRead will take the front - work string and output (work, work).

                  identityReader'

                  Arguments

                  :: MonadBuild m' 
                  => OpParams 
                  -> m' (Tensor Ref ByteString)

                  reader_handle: The handle to reference the Reader.

                  identityReaderV2

                  Arguments

                  :: MonadBuild m' 
                  => m' ResourceHandle

                  reader_handle: The handle to reference the Reader.

                  A Reader that outputs the queued work as both the key and value.

                  To use, enqueue strings in a Queue. ReaderRead will take the front - work string and output (work, work).

                  identityReaderV2'

                  Arguments

                  :: MonadBuild m' 
                  => OpParams 
                  -> m' ResourceHandle

                  reader_handle: The handle to reference the Reader.

                  igamma

                  Arguments

                  :: OneOf `[Double, Float]` t 
                  => Tensor v'1 t

                  a

                  -> Tensor v'2 t

                  x

                  -> Tensor Build t

                  z

                  Compute the lower regularized incomplete Gamma function `Q(a, x)`.

                  The lower regularized incomplete Gamma function is defined as:

                  ``` - P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) - ``` - where - ``` - gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt - ``` - is the lower incomplete Gamma function.

                  Note, above `Q(a, x)` (Igammac) is the upper regularized complete - Gamma function.

                  igamma'

                  Arguments

                  :: OneOf `[Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  a

                  -> Tensor v'2 t

                  x

                  -> Tensor Build t

                  z

                  igammac

                  Arguments

                  :: OneOf `[Double, Float]` t 
                  => Tensor v'1 t

                  a

                  -> Tensor v'2 t

                  x

                  -> Tensor Build t

                  z

                  Compute the upper regularized incomplete Gamma function `Q(a, x)`.

                  The upper regularized incomplete Gamma function is defined as:

                  ``` - Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) - ``` - where - ``` - Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt - ``` - is the upper incomplete Gama function.

                  Note, above `P(a, x)` (Igamma) is the lower regularized complete - Gamma function.

                  igammac'

                  Arguments

                  :: OneOf `[Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  a

                  -> Tensor v'2 t

                  x

                  -> Tensor Build t

                  z

                  imag

                  Arguments

                  :: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
                  => Tensor v'1 t

                  input

                  -> Tensor Build tout

                  output

                  Returns the imaginary part of a complex number.

                  Given a tensor input of complex numbers, this operation returns a tensor of + has one summary value containing a histogram for values.

                  This op reports an InvalidArgument error if any value is not finite.

                  histogramSummary' Source #

                  Arguments

                  :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                  => OpParams 
                  -> Tensor v'1 ByteString

                  tag: Scalar. Tag to use for the Value.

                  -> Tensor v'2 t

                  values: Any shape. Values to use to build the histogram.

                  -> Tensor Build ByteString

                  summary: Scalar. Serialized Summary protocol buffer.

                  iFFT Source #

                  Arguments

                  :: Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its inverse 1D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.ifft + end_compatibility

                  Inverse fast Fourier transform.

                  Computes the inverse 1-dimensional discrete Fourier transform over the + inner-most dimension of input.

                  iFFT' Source #

                  Arguments

                  :: OpParams 
                  -> Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its inverse 1D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.ifft + end_compatibility

                  iFFT2D Source #

                  Arguments

                  :: Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their inverse 2D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.ifft2 + end_compatibility

                  Inverse 2D fast Fourier transform.

                  Computes the inverse 2-dimensional discrete Fourier transform over the + inner-most 2 dimensions of input.

                  iFFT2D' Source #

                  Arguments

                  :: OpParams 
                  -> Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their inverse 2D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.ifft2 + end_compatibility

                  iFFT3D Source #

                  Arguments

                  :: Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their inverse 3D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.ifftn with 3 dimensions. + end_compatibility

                  Inverse 3D fast Fourier transform.

                  Computes the inverse 3-dimensional discrete Fourier transform over the + inner-most 3 dimensions of input.

                  iFFT3D' Source #

                  Arguments

                  :: OpParams 
                  -> Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor Build (Complex Float)

                  output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their inverse 3D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.ifftn with 3 dimensions. + end_compatibility

                  iRFFT Source #

                  Arguments

                  :: Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor v'2 Int32

                  fft_length: An int32 tensor of shape [1]. The FFT length.

                  -> Tensor Build Float

                  output: A float32 tensor of the same rank as input. The inner-most + dimension of input is replaced with the fft_length samples of its inverse + 1D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.irfft + end_compatibility

                  Inverse real-valued fast Fourier transform.

                  Computes the inverse 1-dimensional discrete Fourier transform of a real-valued + signal over the inner-most dimension of input.

                  The inner-most dimension of input is assumed to be the result of RFFT: the + `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + fft_length is not provided, it is computed from the size of the inner-most + dimension of input (`fft_length = 2 * (inner - 1)`). If the FFT length used to + compute input is odd, it should be provided since it cannot be inferred + properly.

                  Along the axis IRFFT is computed on, if `fft_length / 2 + 1` is smaller + than the corresponding dimension of input, the dimension is cropped. If it is + larger, the dimension is padded with zeros.

                  iRFFT' Source #

                  Arguments

                  :: OpParams 
                  -> Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor v'2 Int32

                  fft_length: An int32 tensor of shape [1]. The FFT length.

                  -> Tensor Build Float

                  output: A float32 tensor of the same rank as input. The inner-most + dimension of input is replaced with the fft_length samples of its inverse + 1D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.irfft + end_compatibility

                  iRFFT2D Source #

                  Arguments

                  :: Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor v'2 Int32

                  fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.

                  -> Tensor Build Float

                  output: A float32 tensor of the same rank as input. The inner-most 2 + dimensions of input are replaced with the fft_length samples of their + inverse 2D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.irfft2 + end_compatibility

                  Inverse 2D real-valued fast Fourier transform.

                  Computes the inverse 2-dimensional discrete Fourier transform of a real-valued + signal over the inner-most 2 dimensions of input.

                  The inner-most 2 dimensions of input are assumed to be the result of RFFT2D: + The inner-most dimension contains the `fft_length / 2 + 1` unique components of + the DFT of a real-valued signal. If fft_length is not provided, it is computed + from the size of the inner-most 2 dimensions of input. If the FFT length used + to compute input is odd, it should be provided since it cannot be inferred + properly.

                  Along each axis IRFFT2D is computed on, if fft_length (or + `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + corresponding dimension of input, the dimension is cropped. If it is larger, + the dimension is padded with zeros.

                  iRFFT2D' Source #

                  Arguments

                  :: OpParams 
                  -> Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor v'2 Int32

                  fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.

                  -> Tensor Build Float

                  output: A float32 tensor of the same rank as input. The inner-most 2 + dimensions of input are replaced with the fft_length samples of their + inverse 2D Fourier transform.

                  compatibility(numpy) + Equivalent to np.fft.irfft2 + end_compatibility

                  iRFFT3D Source #

                  Arguments

                  :: Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor v'2 Int32

                  fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.

                  -> Tensor Build Float

                  output: A float32 tensor of the same rank as input. The inner-most 3 + dimensions of input are replaced with the fft_length samples of their + inverse 3D real Fourier transform.

                  compatibility(numpy) + Equivalent to np.irfftn with 3 dimensions. + end_compatibility

                  Inverse 3D real-valued fast Fourier transform.

                  Computes the inverse 3-dimensional discrete Fourier transform of a real-valued + signal over the inner-most 3 dimensions of input.

                  The inner-most 3 dimensions of input are assumed to be the result of RFFT3D: + The inner-most dimension contains the `fft_length / 2 + 1` unique components of + the DFT of a real-valued signal. If fft_length is not provided, it is computed + from the size of the inner-most 3 dimensions of input. If the FFT length used + to compute input is odd, it should be provided since it cannot be inferred + properly.

                  Along each axis IRFFT3D is computed on, if fft_length (or + `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + corresponding dimension of input, the dimension is cropped. If it is larger, + the dimension is padded with zeros.

                  iRFFT3D' Source #

                  Arguments

                  :: OpParams 
                  -> Tensor v'1 (Complex Float)

                  input: A complex64 tensor.

                  -> Tensor v'2 Int32

                  fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.

                  -> Tensor Build Float

                  output: A float32 tensor of the same rank as input. The inner-most 3 + dimensions of input are replaced with the fft_length samples of their + inverse 3D real Fourier transform.

                  compatibility(numpy) + Equivalent to np.irfftn with 3 dimensions. + end_compatibility

                  identity Source #

                  Arguments

                  :: TensorType t 
                  => Tensor v'1 t

                  input

                  -> Tensor Build t

                  output

                  Return a tensor with the same shape and contents as the input tensor or value.

                  identity' Source #

                  Arguments

                  :: TensorType t 
                  => OpParams 
                  -> Tensor v'1 t

                  input

                  -> Tensor Build t

                  output

                  identityReader Source #

                  Arguments

                  :: MonadBuild m' 
                  => m' (Tensor Ref ByteString)

                  reader_handle: The handle to reference the Reader.

                  A Reader that outputs the queued work as both the key and value.

                  To use, enqueue strings in a Queue. ReaderRead will take the front + work string and output (work, work).

                  identityReader' Source #

                  Arguments

                  :: MonadBuild m' 
                  => OpParams 
                  -> m' (Tensor Ref ByteString)

                  reader_handle: The handle to reference the Reader.

                  identityReaderV2 Source #

                  Arguments

                  :: MonadBuild m' 
                  => m' (Tensor Value ResourceHandle)

                  reader_handle: The handle to reference the Reader.

                  A Reader that outputs the queued work as both the key and value.

                  To use, enqueue strings in a Queue. ReaderRead will take the front + work string and output (work, work).

                  identityReaderV2' Source #

                  Arguments

                  :: MonadBuild m' 
                  => OpParams 
                  -> m' (Tensor Value ResourceHandle)

                  reader_handle: The handle to reference the Reader.

                  igamma Source #

                  Arguments

                  :: OneOf '[Double, Float] t 
                  => Tensor v'1 t

                  a

                  -> Tensor v'2 t

                  x

                  -> Tensor Build t

                  z

                  Compute the lower regularized incomplete Gamma function `Q(a, x)`.

                  The lower regularized incomplete Gamma function is defined as:

                  \(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\)

                  where

                  \(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\)

                  is the lower incomplete Gamma function.

                  Note, above `Q(a, x)` (Igammac) is the upper regularized complete + Gamma function.

                  igamma' Source #

                  Arguments

                  :: OneOf '[Double, Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  a

                  -> Tensor v'2 t

                  x

                  -> Tensor Build t

                  z

                  igammac Source #

                  Arguments

                  :: OneOf '[Double, Float] t 
                  => Tensor v'1 t

                  a

                  -> Tensor v'2 t

                  x

                  -> Tensor Build t

                  z

                  Compute the upper regularized incomplete Gamma function `Q(a, x)`.

                  The upper regularized incomplete Gamma function is defined as:

                  \(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\)

                  where

                  \(Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt\)

                  is the upper incomplete Gama function.

                  Note, above `P(a, x)` (Igamma) is the lower regularized complete + Gamma function.

                  igammac' Source #

                  Arguments

                  :: OneOf '[Double, Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  a

                  -> Tensor v'2 t

                  x

                  -> Tensor Build t

                  z

                  ignoreErrorsDataset Source #

                  Arguments

                  :: MonadBuild m' 
                  => [DataType]

                  output_types

                  -> Tensor v'1 ResourceHandle

                  input_dataset

                  -> m' (Tensor Value ResourceHandle)

                  handle

                  Creates a dataset that contains the elements of input_dataset ignoring errors.

                  ignoreErrorsDataset' Source #

                  Arguments

                  :: MonadBuild m' 
                  => OpParams 
                  -> [DataType]

                  output_types

                  -> Tensor v'1 ResourceHandle

                  input_dataset

                  -> m' (Tensor Value ResourceHandle)

                  handle

                  imag Source #

                  Arguments

                  :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) 
                  => Tensor v'1 t

                  input

                  -> Tensor Build tout

                  output

                  Returns the imaginary part of a complex number.

                  Given a tensor input of complex numbers, this operation returns a tensor of type float that is the imaginary part of each element in input. All elements in input must be complex numbers of the form \(a + bj\), where *a* is the real part and *b* is the imaginary part returned by this operation.

                  For example:

                  ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] tf.imag(input) ==> [4.75, 5.75] - ```

                  imag'

                  Arguments

                  :: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
                  => OpParams 
                  -> Tensor v'1 t

                  input

                  -> Tensor Build tout

                  output

                  imageSummary

                  Arguments

                  :: OneOf `[Word16, Word8, Float]` t 
                  => Tensor v'1 ByteString

                  tag: Scalar. Used to build the tag attribute of the summary values.

                  -> Tensor v'2 t

                  tensor: 4-D of shape `[batch_size, height, width, channels]` where - channels is 1, 3, or 4.

                  -> Tensor Build ByteString

                  summary: Scalar. Serialized Summary protocol buffer.

                  Outputs a Summary protocol buffer with images.

                  The summary has up to max_images summary values containing images. The + ```

                  imag' Source #

                  Arguments

                  :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) 
                  => OpParams 
                  -> Tensor v'1 t

                  input

                  -> Tensor Build tout

                  output

                  imageSummary Source #

                  Arguments

                  :: OneOf '[Word16, Word8, Float] t 
                  => Tensor v'1 ByteString

                  tag: Scalar. Used to build the tag attribute of the summary values.

                  -> Tensor v'2 t

                  tensor: 4-D of shape `[batch_size, height, width, channels]` where + channels is 1, 3, or 4.

                  -> Tensor Build ByteString

                  summary: Scalar. Serialized Summary protocol buffer.

                  Outputs a Summary protocol buffer with images.

                  The summary has up to max_images summary values containing images. The images are built from tensor which must be 4-D with shape `[batch_size, height, width, channels]` and where channels can be:

                  • 1: tensor is interpreted as Grayscale.
                  • 3: tensor is interpreted as RGB.
                  • 4: tensor is interpreted as RGBA.

                  The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range @@ -1347,51 +1522,54 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core normalization algorithms:

                  • If the input values are all positive, they are rescaled so the largest one is 255.
                  • If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, - or the largest one is 255.

                  The tag argument is a scalar Tensor of type string. It is used to + or the largest one is 255.

                The tag argument is a scalar Tensor of type string. It is used to build the tag of the summary values:

                • If max_images is 1, the summary value tag is '*tag*/image'.
                • If max_images is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.

                The bad_color argument is the color to use in the generated images for non-finite input values. It is a unit8 1-D tensor of length channels. Each element must be in the range `[0, 255]` (It represents the value of a pixel in the output image). Non-finite values in the input tensor are replaced by this tensor in the output image. The default value is the color - red.

                imageSummary'

                Arguments

                :: OneOf `[Word16, Word8, Float]` t 
                => OpParams 
                -> Tensor v'1 ByteString

                tag: Scalar. Used to build the tag attribute of the summary values.

                -> Tensor v'2 t

                tensor: 4-D of shape `[batch_size, height, width, channels]` where - channels is 1, 3, or 4.

                -> Tensor Build ByteString

                summary: Scalar. Serialized Summary protocol buffer.

                immutableConst

                Arguments

                :: TensorType dtype 
                => Shape

                shape: Shape of the returned tensor.

                -> Tensor Build dtype

                tensor

                Returns immutable tensor from memory region.

                The current implementation memmaps the tensor from a file.

                immutableConst'

                Arguments

                :: TensorType dtype 
                => OpParams 
                -> Shape

                shape: Shape of the returned tensor.

                -> Tensor Build dtype

                tensor

                inTopK

                Arguments

                :: OneOf `[Int32, Int64]` t 
                => Int64

                k: Number of top elements to look at for computing precision.

                -> Tensor v'1 Float

                predictions: A batch_size x classes tensor.

                -> Tensor v'2 t

                targets: A batch_size vector of class ids.

                -> Tensor Build Bool

                precision: Computed Precision at k as a `bool Tensor`.

                Says whether the targets are in the top K predictions.

                This outputs a batch_size bool array, an entry `out[i]` is true if the - prediction for the target class is among the top k predictions among - all predictions for example i. Note that the behavior of InTopK differs - from the TopK op in its handling of ties; if multiple classes have the - same prediction value and straddle the top-k boundary, all of those - classes are considered to be in the top k.

                More formally, let

                \(predictions_i\) be the predictions for all classes for example i, - \(targets_i\) be the target class for example i, - \(out_i\) be the output for example i,

                $$out_i = predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$

                inTopK'

                Arguments

                :: OneOf `[Int32, Int64]` t 
                => OpParams 
                -> Int64

                k: Number of top elements to look at for computing precision.

                -> Tensor v'1 Float

                predictions: A batch_size x classes tensor.

                -> Tensor v'2 t

                targets: A batch_size vector of class ids.

                -> Tensor Build Bool

                precision: Computed Precision at k as a `bool Tensor`.

                initializeTable

                Arguments

                :: (MonadBuild m', TensorType tkey, TensorType tval) 
                => Tensor Ref ByteString

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 tkey

                keys: Keys of type Tkey.

                -> Tensor v'3 tval

                values: Values of type Tval.

                -> m' ControlNode 

                Table initializer that takes two tensors for keys and values respectively.

                initializeTable'

                Arguments

                :: (MonadBuild m', TensorType tkey, TensorType tval) 
                => OpParams 
                -> Tensor Ref ByteString

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 tkey

                keys: Keys of type Tkey.

                -> Tensor v'3 tval

                values: Values of type Tval.

                -> m' ControlNode 

                initializeTableFromTextFile

                Arguments

                :: MonadBuild m' 
                => Int64

                key_index: Column index in a line to get the table key values from.

                -> Int64

                value_index: Column index that represents information of a line to get the table - value values from.

                -> Tensor Ref ByteString

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 ByteString

                filename: Filename of a vocabulary text file.

                -> m' ControlNode 

                Initializes a table from a text file.

                It inserts one key-value pair into the table for each line of the file. + red.

                imageSummary' Source #

                Arguments

                :: OneOf '[Word16, Word8, Float] t 
                => OpParams 
                -> Tensor v'1 ByteString

                tag: Scalar. Used to build the tag attribute of the summary values.

                -> Tensor v'2 t

                tensor: 4-D of shape `[batch_size, height, width, channels]` where + channels is 1, 3, or 4.

                -> Tensor Build ByteString

                summary: Scalar. Serialized Summary protocol buffer.

                immutableConst Source #

                Arguments

                :: TensorType dtype 
                => Shape

                shape: Shape of the returned tensor.

                -> Tensor Build dtype

                tensor

                Returns immutable tensor from memory region.

                The current implementation memmaps the tensor from a file.

                immutableConst' Source #

                Arguments

                :: TensorType dtype 
                => OpParams 
                -> Shape

                shape: Shape of the returned tensor.

                -> Tensor Build dtype

                tensor

                predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$

                inTopK Source #

                Arguments

                :: OneOf '[Int32, Int64] t 
                => Int64

                k: Number of top elements to look at for computing precision.

                -> Tensor v'1 Float

                predictions: A batch_size x classes tensor.

                -> Tensor v'2 t

                targets: A batch_size vector of class ids.

                -> Tensor Build Bool

                precision: Computed Precision at k as a `bool Tensor`.

                inTopK' Source #

                Arguments

                :: OneOf '[Int32, Int64] t 
                => OpParams 
                -> Int64

                k: Number of top elements to look at for computing precision.

                -> Tensor v'1 Float

                predictions: A batch_size x classes tensor.

                -> Tensor v'2 t

                targets: A batch_size vector of class ids.

                -> Tensor Build Bool

                precision: Computed Precision at k as a `bool Tensor`.

                initializeTable Source #

                Arguments

                :: (MonadBuild m', TensorType tkey, TensorType tval) 
                => Tensor Ref ByteString

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 tkey

                keys: Keys of type Tkey.

                -> Tensor v'3 tval

                values: Values of type Tval.

                -> m' ControlNode 

                Table initializer that takes two tensors for keys and values respectively.

                initializeTable' Source #

                Arguments

                :: (MonadBuild m', TensorType tkey, TensorType tval) 
                => OpParams 
                -> Tensor Ref ByteString

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 tkey

                keys: Keys of type Tkey.

                -> Tensor v'3 tval

                values: Values of type Tval.

                -> m' ControlNode 

                initializeTableFromTextFile Source #

                Arguments

                :: MonadBuild m' 
                => Int64

                key_index: Column index in a line to get the table key values from.

                -> Int64

                value_index: Column index that represents information of a line to get the table + value values from.

                -> Tensor Ref ByteString

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 ByteString

                filename: Filename of a vocabulary text file.

                -> m' ControlNode 

                Initializes a table from a text file.

                It inserts one key-value pair into the table for each line of the file. The key and value is extracted from the whole line content, elements from the split line based on delimiter or the line number (starting from zero). Where to extract the key and value from a line is specified by key_index and value_index.

                • A value of -1 means use the line number(starting from zero), expects int64.
                • A value of -2 means use the whole line content, expects string.
                • A value >= 0 means use the index (starting at zero) of the split line based - on delimiter.

                initializeTableFromTextFile'

                Arguments

                :: MonadBuild m' 
                => OpParams 
                -> Int64

                key_index: Column index in a line to get the table key values from.

                -> Int64

                value_index: Column index that represents information of a line to get the table - value values from.

                -> Tensor Ref ByteString

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 ByteString

                filename: Filename of a vocabulary text file.

                -> m' ControlNode 

                inv

                Arguments

                :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                => Tensor v'1 t

                x

                -> Tensor Build t

                y

                Computes the reciprocal of x element-wise.

                I.e., \(y = 1 / x\).

                invGrad

                Arguments

                :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                => Tensor v'1 t

                x

                -> Tensor v'2 t

                y

                -> Tensor Build t

                z

                Computes the gradient for the inverse of x wrt its input.

                Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy - is the corresponding input gradient.

                invGrad'

                Arguments

                :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                => OpParams 
                -> Tensor v'1 t

                x

                -> Tensor v'2 t

                y

                -> Tensor Build t

                z

                invertPermutation

                Arguments

                :: OneOf `[Int32, Int64]` t 
                => Tensor v'1 t

                x: 1-D.

                -> Tensor Build t

                y: 1-D.

                Computes the inverse permutation of a tensor.

                This operation computes the inverse of an index permutation. It takes a 1-D + on delimiter.

                initializeTableFromTextFile' Source #

                Arguments

                :: MonadBuild m' 
                => OpParams 
                -> Int64

                key_index: Column index in a line to get the table key values from.

                -> Int64

                value_index: Column index that represents information of a line to get the table + value values from.

                -> Tensor Ref ByteString

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 ByteString

                filename: Filename of a vocabulary text file.

                -> m' ControlNode 

                initializeTableFromTextFileV2 Source #

                Arguments

                :: MonadBuild m' 
                => Int64

                key_index: Column index in a line to get the table key values from.

                -> Int64

                value_index: Column index that represents information of a line to get the table + value values from.

                -> Tensor v'1 ResourceHandle

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 ByteString

                filename: Filename of a vocabulary text file.

                -> m' ControlNode 

                Initializes a table from a text file.

                It inserts one key-value pair into the table for each line of the file. + The key and value is extracted from the whole line content, elements from the + split line based on delimiter or the line number (starting from zero). + Where to extract the key and value from a line is specified by key_index and + value_index.

                • A value of -1 means use the line number(starting from zero), expects int64.
                • A value of -2 means use the whole line content, expects string.
                • A value >= 0 means use the index (starting at zero) of the split line based + on delimiter.

                initializeTableFromTextFileV2' Source #

                Arguments

                :: MonadBuild m' 
                => OpParams 
                -> Int64

                key_index: Column index in a line to get the table key values from.

                -> Int64

                value_index: Column index that represents information of a line to get the table + value values from.

                -> Tensor v'1 ResourceHandle

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 ByteString

                filename: Filename of a vocabulary text file.

                -> m' ControlNode 

                initializeTableV2 Source #

                Arguments

                :: (MonadBuild m', TensorType tkey, TensorType tval) 
                => Tensor v'1 ResourceHandle

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 tkey

                keys: Keys of type Tkey.

                -> Tensor v'3 tval

                values: Values of type Tval.

                -> m' ControlNode 

                Table initializer that takes two tensors for keys and values respectively.

                initializeTableV2' Source #

                Arguments

                :: (MonadBuild m', TensorType tkey, TensorType tval) 
                => OpParams 
                -> Tensor v'1 ResourceHandle

                table_handle: Handle to a table which will be initialized.

                -> Tensor v'2 tkey

                keys: Keys of type Tkey.

                -> Tensor v'3 tval

                values: Values of type Tval.

                -> m' ControlNode 

                inv Source #

                Arguments

                :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                => Tensor v'1 t

                x

                -> Tensor Build t

                y

                Computes the reciprocal of x element-wise.

                I.e., \(y = 1 / x\).

                invGrad Source #

                Arguments

                :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                => Tensor v'1 t

                x

                -> Tensor v'2 t

                y

                -> Tensor Build t

                z

                Computes the gradient for the inverse of x wrt its input.

                Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy + is the corresponding input gradient.

                invGrad' Source #

                Arguments

                :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                => OpParams 
                -> Tensor v'1 t

                x

                -> Tensor v'2 t

                y

                -> Tensor Build t

                z

                invert Source #

                Arguments

                :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8] t 
                => Tensor v'1 t

                x

                -> Tensor Build t

                y

                Flips all bits elementwise.

                The result will have exactly those bits set, that are not set in x. The + computation is performed on the underlying representation of x.

                invert' Source #

                Arguments

                :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8] t 
                => OpParams 
                -> Tensor v'1 t

                x

                -> Tensor Build t

                y

                invertPermutation Source #

                Arguments

                :: OneOf '[Int32, Int64] t 
                => Tensor v'1 t

                x: 1-D.

                -> Tensor Build t

                y: 1-D.

                Computes the inverse permutation of a tensor.

                This operation computes the inverse of an index permutation. It takes a 1-D integer tensor x, which represents the indices of a zero-based array, and swaps each value with its index position. In other words, for an output tensor - y and an input tensor x, this operation computes the following:

                `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`

                The values must include 0. There can be no duplicate values or negative values.

                For example:

                ```prettyprint + y and an input tensor x, this operation computes the following:

                `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`

                The values must include 0. There can be no duplicate values or negative values.

                For example:

                ``` # tensor x is [3, 4, 0, 2, 1] invert_permutation(x) ==> [2, 4, 3, 0, 1] - ```

                invertPermutation'

                Arguments

                :: OneOf `[Int32, Int64]` t 
                => OpParams 
                -> Tensor v'1 t

                x: 1-D.

                -> Tensor Build t

                y: 1-D.

                isFinite

                Arguments

                :: OneOf `[Word16, Double, Float]` t 
                => Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                Returns which elements of x are finite.

                compatibility(numpy) + ```

                invertPermutation' Source #

                Arguments

                :: OneOf '[Int32, Int64] t 
                => OpParams 
                -> Tensor v'1 t

                x: 1-D.

                -> Tensor Build t

                y: 1-D.

                isFinite Source #

                Arguments

                :: OneOf '[Word16, Double, Float] t 
                => Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                Returns which elements of x are finite.

                compatibility(numpy) Equivalent to np.isfinite - end_compatibility

                isFinite'

                Arguments

                :: OneOf `[Word16, Double, Float]` t 
                => OpParams 
                -> Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                isInf

                Arguments

                :: OneOf `[Word16, Double, Float]` t 
                => Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                Returns which elements of x are Inf.

                compatibility(numpy) + end_compatibility

                isFinite' Source #

                Arguments

                :: OneOf '[Word16, Double, Float] t 
                => OpParams 
                -> Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                isInf Source #

                Arguments

                :: OneOf '[Word16, Double, Float] t 
                => Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                Returns which elements of x are Inf.

                compatibility(numpy) Equivalent to np.isinf - end_compatibility

                isInf'

                Arguments

                :: OneOf `[Word16, Double, Float]` t 
                => OpParams 
                -> Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                isNan

                Arguments

                :: OneOf `[Word16, Double, Float]` t 
                => Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                Returns which elements of x are NaN.

                compatibility(numpy) + end_compatibility

                isInf' Source #

                Arguments

                :: OneOf '[Word16, Double, Float] t 
                => OpParams 
                -> Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                isNan Source #

                Arguments

                :: OneOf '[Word16, Double, Float] t 
                => Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                Returns which elements of x are NaN.

                compatibility(numpy) Equivalent to np.isnan - end_compatibility

                isNan'

                Arguments

                :: OneOf `[Word16, Double, Float]` t 
                => OpParams 
                -> Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                isVariableInitialized

                Arguments

                :: (MonadBuild m', TensorType dtype) 
                => Tensor Ref dtype

                ref: Should be from a Variable node. May be uninitialized.

                -> m' (Tensor Value Bool)

                is_initialized

                Checks whether a tensor has been initialized.

                Outputs boolean scalar indicating whether the tensor has been initialized.

                isVariableInitialized'

                Arguments

                :: (MonadBuild m', TensorType dtype) 
                => OpParams 
                -> Tensor Ref dtype

                ref: Should be from a Variable node. May be uninitialized.

                -> m' (Tensor Value Bool)

                is_initialized

                l2Loss

                Arguments

                :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                => Tensor v'1 t

                t: Typically 2-D, but may have any dimensions.

                -> Tensor Build t

                output: 0-D.

                L2 Loss.

                Computes half the L2 norm of a tensor without the sqrt:

                output = sum(t ** 2) / 2

                l2Loss'

                Arguments

                :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                => OpParams 
                -> Tensor v'1 t

                t: Typically 2-D, but may have any dimensions.

                -> Tensor Build t

                output: 0-D.

                lRN

                Arguments

                :: OneOf `[Word16, Float]` t 
                => Tensor v'1 t

                input: 4-D.

                -> Tensor Build t

                output

                Local Response Normalization.

                The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last + end_compatibility

                isNan' Source #

                Arguments

                :: OneOf '[Word16, Double, Float] t 
                => OpParams 
                -> Tensor v'1 t

                x

                -> Tensor Build Bool

                y

                isVariableInitialized Source #

                Arguments

                :: (MonadBuild m', TensorType dtype) 
                => Tensor Ref dtype

                ref: Should be from a Variable node. May be uninitialized.

                -> m' (Tensor Value Bool)

                is_initialized

                Checks whether a tensor has been initialized.

                Outputs boolean scalar indicating whether the tensor has been initialized.

                isVariableInitialized' Source #

                Arguments

                :: (MonadBuild m', TensorType dtype) 
                => OpParams 
                -> Tensor Ref dtype

                ref: Should be from a Variable node. May be uninitialized.

                -> m' (Tensor Value Bool)

                is_initialized

                iterator Source #

                Arguments

                :: MonadBuild m' 
                => [DataType]

                output_types

                -> m' (Tensor Value ResourceHandle)

                handle: A handle to the iterator that can be passed to a MakeIterator + or IteratorGetNext op.

                A container for an iterator resource.

                iterator' Source #

                Arguments

                :: MonadBuild m' 
                => OpParams 
                -> [DataType]

                output_types

                -> m' (Tensor Value ResourceHandle)

                handle: A handle to the iterator that can be passed to a MakeIterator + or IteratorGetNext op.

                iteratorDispose Source #

                Arguments

                :: MonadBuild m' 
                => Tensor v'1 ResourceHandle

                iterator

                -> m' ControlNode 

                Releases any resources used by the given iterator.

                iteratorDispose' Source #

                Arguments

                :: MonadBuild m' 
                => OpParams 
                -> Tensor v'1 ResourceHandle

                iterator

                -> m' ControlNode 

                iteratorFromStringHandle Source #

                Arguments

                :: MonadBuild m' 
                => Tensor v'1 ByteString

                string_handle: A string representation of the given handle.

                -> m' (Tensor Value ResourceHandle)

                resource_handle: A handle to an iterator resource.

                Converts the given string representing a handle to an iterator to a resource.

                iteratorFromStringHandle' Source #

                Arguments

                :: MonadBuild m' 
                => OpParams 
                -> Tensor v'1 ByteString

                string_handle: A string representation of the given handle.

                -> m' (Tensor Value ResourceHandle)

                resource_handle: A handle to an iterator resource.

                iteratorGetNext Source #

                Arguments

                :: (MonadBuild m', TensorTypes output_types) 
                => Tensor v'1 ResourceHandle

                iterator

                -> m' (TensorList Value output_types)

                components

                Gets the next output from the given iterator.

                iteratorGetNext' Source #

                Arguments

                :: (MonadBuild m', TensorTypes output_types) 
                => OpParams 
                -> Tensor v'1 ResourceHandle

                iterator

                -> m' (TensorList Value output_types)

                components

                iteratorToStringHandle Source #

                Arguments

                :: MonadBuild m' 
                => Tensor v'1 ResourceHandle

                resource_handle: A handle to an iterator resource.

                -> m' (Tensor Value ByteString)

                string_handle: A string representation of the given handle.

                Converts the given resource_handle representing an iterator to a string.

                iteratorToStringHandle' Source #

                Arguments

                :: MonadBuild m' 
                => OpParams 
                -> Tensor v'1 ResourceHandle

                resource_handle: A handle to an iterator resource.

                -> m' (Tensor Value ByteString)

                string_handle: A string representation of the given handle.

                l2Loss Source #

                Arguments

                :: OneOf '[Word16, Double, Float] t 
                => Tensor v'1 t

                t: Typically 2-D, but may have any dimensions.

                -> Tensor Build t

                output: 0-D.

                L2 Loss.

                Computes half the L2 norm of a tensor without the sqrt:

                output = sum(t ** 2) / 2

                l2Loss' Source #

                Arguments

                :: OneOf '[Word16, Double, Float] t 
                => OpParams 
                -> Tensor v'1 t

                t: Typically 2-D, but may have any dimensions.

                -> Tensor Build t

                output: 0-D.

                lMDBReader Source #

                Arguments

                :: MonadBuild m' 
                => m' (Tensor Ref ByteString)

                reader_handle: The handle to reference the Reader.

                A Reader that outputs the records from a LMDB file.

                lMDBReader' Source #

                Arguments

                :: MonadBuild m' 
                => OpParams 
                -> m' (Tensor Ref ByteString)

                reader_handle: The handle to reference the Reader.

                lRN Source #

                Arguments

                :: OneOf '[Word16, Float] t 
                => Tensor v'1 t

                input: 4-D.

                -> Tensor Build t

                output

                Local Response Normalization.

                The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last dimension), and each vector is normalized independently. Within a given vector, each component is divided by the weighted, squared sum of inputs within depth_radius. In detail,

                sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) ** beta

                For details, see Krizhevsky et al., ImageNet classification with deep - convolutional neural networks (NIPS 2012).

                lRN'

                Arguments

                :: OneOf `[Word16, Float]` t 
                => OpParams 
                -> Tensor v'1 t

                input: 4-D.

                -> Tensor Build t

                output

                lRNGrad

                Arguments

                :: OneOf `[Word16, Float]` t 
                => Tensor v'1 t

                input_grads: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor v'2 t

                input_image: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor v'3 t

                output_image: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor Build t

                output: The gradients for LRN.

                Gradients for Local Response Normalization.

                lRNGrad'

                Arguments

                :: OneOf `[Word16, Float]` t 
                => OpParams 
                -> Tensor v'1 t

                input_grads: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor v'2 t

                input_image: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor v'3 t

                output_image: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor Build t

                output: The gradients for LRN.

                learnedUnigramCandidateSampler

                Arguments

                :: Int64

                num_sampled: Number of candidates to randomly sample per batch.

                -> Int64

                num_true: Number of true labels per context.

                -> Int64

                range_max: The sampler will sample integers from the interval [0, range_max).

                -> Bool

                unique: If unique is true, we sample with rejection, so that all sampled + convolutional neural networks (NIPS 2012).

                lRN' Source #

                Arguments

                :: OneOf '[Word16, Float] t 
                => OpParams 
                -> Tensor v'1 t

                input: 4-D.

                -> Tensor Build t

                output

                lRNGrad Source #

                Arguments

                :: OneOf '[Word16, Float] t 
                => Tensor v'1 t

                input_grads: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor v'2 t

                input_image: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor v'3 t

                output_image: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor Build t

                output: The gradients for LRN.

                Gradients for Local Response Normalization.

                lRNGrad' Source #

                Arguments

                :: OneOf '[Word16, Float] t 
                => OpParams 
                -> Tensor v'1 t

                input_grads: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor v'2 t

                input_image: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor v'3 t

                output_image: 4-D with shape `[batch, height, width, channels]`.

                -> Tensor Build t

                output: The gradients for LRN.

                learnedUnigramCandidateSampler Source #

                Arguments

                :: MonadBuild m' 
                => Int64

                num_sampled: Number of candidates to randomly sample.

                -> Int64

                num_true: Number of true labels per context.

                -> Int64

                range_max: The sampler will sample integers from the interval [0, range_max).

                -> Bool

                unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

                -> Tensor v'1 Int64

                true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

                -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

                (sampled_candidates, true_expected_count, sampled_expected_count)

                • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

                -> Tensor v'1 Int64

                true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

                -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

                (sampled_candidates, true_expected_count, sampled_expected_count)

                • sampled_candidates: A vector of length num_sampled, in which each element is the ID of a sampled candidate.
                • true_expected_count: A batch_size * num_true matrix, representing the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
                • sampled_expected_count: A vector of length num_sampled, for each sampled @@ -1401,35 +1579,35 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core go/candidate-sampling.

                  For each batch, this op picks a single set of sampled candidate labels.

                  The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the - true labels.

                  learnedUnigramCandidateSampler'

                  Arguments

                  :: OpParams 
                  -> Int64

                  num_sampled: Number of candidates to randomly sample per batch.

                  -> Int64

                  num_true: Number of true labels per context.

                  -> Int64

                  range_max: The sampler will sample integers from the interval [0, range_max).

                  -> Bool

                  unique: If unique is true, we sample with rejection, so that all sampled + true labels.

                  learnedUnigramCandidateSampler' Source #

                  Arguments

                  :: MonadBuild m' 
                  => OpParams 
                  -> Int64

                  num_sampled: Number of candidates to randomly sample.

                  -> Int64

                  num_true: Number of true labels per context.

                  -> Int64

                  range_max: The sampler will sample integers from the interval [0, range_max).

                  -> Bool

                  unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

                  -> Tensor v'1 Int64

                  true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

                  -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

                  (sampled_candidates, true_expected_count, sampled_expected_count)

                  • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

                  -> Tensor v'1 Int64

                  true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

                  -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

                  (sampled_candidates, true_expected_count, sampled_expected_count)

                  • sampled_candidates: A vector of length num_sampled, in which each element is the ID of a sampled candidate.
                  • true_expected_count: A batch_size * num_true matrix, representing the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
                  • sampled_expected_count: A vector of length num_sampled, for each sampled candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

                  less

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  Returns the truth value of (x < y) element-wise.

                  • NOTE*: Less supports broadcasting. More about broadcasting - here

                  less'

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  lessEqual

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  Returns the truth value of (x <= y) element-wise.

                  • NOTE*: LessEqual supports broadcasting. More about broadcasting - here

                  lessEqual'

                  Arguments

                  :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  lgamma

                  Arguments

                  :: OneOf `[Word16, Double, Float]` t 
                  => Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  Computes the log of the absolute value of `Gamma(x)` element-wise.

                  lgamma'

                  Arguments

                  :: OneOf `[Word16, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  linSpace

                  Arguments

                  :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                  => Tensor v'1 t

                  start: First entry in the range.

                  -> Tensor v'2 t

                  stop: Last entry in the range.

                  -> Tensor v'3 tidx

                  num: Number of values to generate.

                  -> Tensor Build t

                  output: 1-D. The generated values.

                  Generates values in an interval.

                  A sequence of num evenly-spaced values are generated beginning at start. + probability.

                  less Source #

                  Arguments

                  :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                  => Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  Returns the truth value of (x < y) element-wise.

                  • NOTE*: Less supports broadcasting. More about broadcasting + here

                  less' Source #

                  Arguments

                  :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  lessEqual Source #

                  Arguments

                  :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                  => Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  Returns the truth value of (x <= y) element-wise.

                  • NOTE*: LessEqual supports broadcasting. More about broadcasting + here

                  lessEqual' Source #

                  Arguments

                  :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor v'2 t

                  y

                  -> Tensor Build Bool

                  z

                  lgamma Source #

                  Arguments

                  :: OneOf '[Word16, Double, Float] t 
                  => Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  Computes the log of the absolute value of `Gamma(x)` element-wise.

                  lgamma' Source #

                  Arguments

                  :: OneOf '[Word16, Double, Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  linSpace Source #

                  Arguments

                  :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                  => Tensor v'1 t

                  start: First entry in the range.

                  -> Tensor v'2 t

                  stop: Last entry in the range.

                  -> Tensor v'3 tidx

                  num: Number of values to generate.

                  -> Tensor Build t

                  output: 1-D. The generated values.

                  Generates values in an interval.

                  A sequence of num evenly-spaced values are generated beginning at start. If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, so that the last one is exactly stop.

                  For example:

                  ``` tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] - ```

                  linSpace'

                  Arguments

                  :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                  => OpParams 
                  -> Tensor v'1 t

                  start: First entry in the range.

                  -> Tensor v'2 t

                  stop: Last entry in the range.

                  -> Tensor v'3 tidx

                  num: Number of values to generate.

                  -> Tensor Build t

                  output: 1-D. The generated values.

                  listDiff

                  Arguments

                  :: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
                  => Tensor v'1 t

                  x: 1-D. Values to keep.

                  -> Tensor v'2 t

                  y: 1-D. Values to remove.

                  -> (Tensor Build t, Tensor Build out_idx)

                  (out, idx)

                  • out: 1-D. Values present in x but not in y.
                  • idx: 1-D. Positions of x values preserved in out.

                  Computes the difference between two lists of numbers or strings.

                  Given a list x and a list y, this operation returns a list out that + ```

                  linSpace' Source #

                  Arguments

                  :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                  => OpParams 
                  -> Tensor v'1 t

                  start: First entry in the range.

                  -> Tensor v'2 t

                  stop: Last entry in the range.

                  -> Tensor v'3 tidx

                  num: Number of values to generate.

                  -> Tensor Build t

                  output: 1-D. The generated values.

                  listDiff Source #

                  Arguments

                  :: (TensorType t, OneOf '[Int32, Int64] out_idx) 
                  => Tensor v'1 t

                  x: 1-D. Values to keep.

                  -> Tensor v'2 t

                  y: 1-D. Values to remove.

                  -> (Tensor Build t, Tensor Build out_idx)

                  (out, idx)

                  • out: 1-D. Values present in x but not in y.
                  • idx: 1-D. Positions of x values preserved in out.

                  Computes the difference between two lists of numbers or strings.

                  Given a list x and a list y, this operation returns a list out that represents all values that are in x but not in y. The returned list out is sorted in the same order that the numbers appear in x (duplicates are preserved). This operation also returns a list idx that represents the - position of each out element in x. In other words:

                  `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`

                  For example, given this input:

                  ```prettyprint + position of each out element in x. In other words:

                  `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`

                  For example, given this input:

                  ``` x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] - ```

                  This operation would return:

                  ```prettyprint + ```

                  This operation would return:

                  ``` out ==> [2, 4, 6] idx ==> [1, 3, 5] - ```

                  listDiff'

                  Arguments

                  :: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
                  => OpParams 
                  -> Tensor v'1 t

                  x: 1-D. Values to keep.

                  -> Tensor v'2 t

                  y: 1-D. Values to remove.

                  -> (Tensor Build t, Tensor Build out_idx)

                  (out, idx)

                  • out: 1-D. Values present in x but not in y.
                  • idx: 1-D. Positions of x values preserved in out.

                  log

                  Arguments

                  :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                  => Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  Computes natural logarithm of x element-wise.

                  I.e., \(y = log_e x\).

                  log'

                  Arguments

                  :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  log1p

                  Arguments

                  :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                  => Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  Computes natural logarithm of (1 + x) element-wise.

                  I.e., \(y = log_e (1 + x)\).

                  log1p'

                  Arguments

                  :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  logSoftmax

                  Arguments

                  :: OneOf `[Word16, Double, Float]` t 
                  => Tensor v'1 t

                  logits: 2-D with shape `[batch_size, num_classes]`.

                  -> Tensor Build t

                  logsoftmax: Same shape as logits.

                  Computes log softmax activations.

                  For each batch i and class j we have

                  logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))

                  logSoftmax'

                  Arguments

                  :: OneOf `[Word16, Double, Float]` t 
                  => OpParams 
                  -> Tensor v'1 t

                  logits: 2-D with shape `[batch_size, num_classes]`.

                  -> Tensor Build t

                  logsoftmax: Same shape as logits.

                  logUniformCandidateSampler

                  Arguments

                  :: Int64

                  num_sampled: Number of candidates to randomly sample per batch.

                  -> Int64

                  num_true: Number of true labels per context.

                  -> Int64

                  range_max: The sampler will sample integers from the interval [0, range_max).

                  -> Bool

                  unique: If unique is true, we sample with rejection, so that all sampled + ```

                  listDiff' Source #

                  Arguments

                  :: (TensorType t, OneOf '[Int32, Int64] out_idx) 
                  => OpParams 
                  -> Tensor v'1 t

                  x: 1-D. Values to keep.

                  -> Tensor v'2 t

                  y: 1-D. Values to remove.

                  -> (Tensor Build t, Tensor Build out_idx)

                  (out, idx)

                  • out: 1-D. Values present in x but not in y.
                  • idx: 1-D. Positions of x values preserved in out.

                  log Source #

                  Arguments

                  :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                  => Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  Computes natural logarithm of x element-wise.

                  I.e., \(y = log_e x\).

                  log1p Source #

                  Arguments

                  :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                  => Tensor v'1 t

                  x

                  -> Tensor Build t

                  y

                  Computes natural logarithm of (1 + x) element-wise.

                  I.e., \(y = log_e (1 + x)\).

                  logSoftmax Source #

                  Arguments

                  :: OneOf '[Word16, Double, Float] t 
                  => Tensor v'1 t

                  logits: 2-D with shape `[batch_size, num_classes]`.

                  -> Tensor Build t

                  logsoftmax: Same shape as logits.

                  Computes log softmax activations.

                  For each batch i and class j we have

                  logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))

                  logSoftmax' Source #

                  Arguments

                  :: OneOf '[Word16, Double, Float] t 
                  => OpParams 
                  -> Tensor v'1 t

                  logits: 2-D with shape `[batch_size, num_classes]`.

                  -> Tensor Build t

                  logsoftmax: Same shape as logits.

                  logUniformCandidateSampler Source #

                  Arguments

                  :: MonadBuild m' 
                  => Int64

                  num_sampled: Number of candidates to randomly sample.

                  -> Int64

                  num_true: Number of true labels per context.

                  -> Int64

                  range_max: The sampler will sample integers from the interval [0, range_max).

                  -> Bool

                  unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

                  -> Tensor v'1 Int64

                  true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

                  -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

                  (sampled_candidates, true_expected_count, sampled_expected_count)

                  • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

                  -> Tensor v'1 Int64

                  true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

                  -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

                  (sampled_candidates, true_expected_count, sampled_expected_count)

                  • sampled_candidates: A vector of length num_sampled, in which each element is the ID of a sampled candidate.
                  • true_expected_count: A batch_size * num_true matrix, representing the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
                  • sampled_expected_count: A vector of length num_sampled, for each sampled @@ -1439,35 +1617,47 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core go/candidate-sampling.

                    For each batch, this op picks a single set of sampled candidate labels.

                    The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the - true labels.

                    logUniformCandidateSampler'

                    Arguments

                    :: OpParams 
                    -> Int64

                    num_sampled: Number of candidates to randomly sample per batch.

                    -> Int64

                    num_true: Number of true labels per context.

                    -> Int64

                    range_max: The sampler will sample integers from the interval [0, range_max).

                    -> Bool

                    unique: If unique is true, we sample with rejection, so that all sampled + true labels.

                    logUniformCandidateSampler' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Int64

                    num_sampled: Number of candidates to randomly sample.

                    -> Int64

                    num_true: Number of true labels per context.

                    -> Int64

                    range_max: The sampler will sample integers from the interval [0, range_max).

                    -> Bool

                    unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

                    -> Tensor v'1 Int64

                    true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

                    -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

                    (sampled_candidates, true_expected_count, sampled_expected_count)

                    • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

                    -> Tensor v'1 Int64

                    true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

                    -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

                    (sampled_candidates, true_expected_count, sampled_expected_count)

                    • sampled_candidates: A vector of length num_sampled, in which each element is the ID of a sampled candidate.
                    • true_expected_count: A batch_size * num_true matrix, representing the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
                    • sampled_expected_count: A vector of length num_sampled, for each sampled candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

                    logicalAnd

                    Arguments

                    :: Tensor v'1 Bool

                    x

                    -> Tensor v'2 Bool

                    y

                    -> Tensor Build Bool

                    z

                    Returns the truth value of x AND y element-wise.

                    • NOTE*: LogicalAnd supports broadcasting. More about broadcasting - here

                    logicalAnd'

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Bool

                    x

                    -> Tensor v'2 Bool

                    y

                    -> Tensor Build Bool

                    z

                    logicalNot

                    Arguments

                    :: Tensor v'1 Bool

                    x

                    -> Tensor Build Bool

                    y

                    Returns the truth value of NOT x element-wise.

                    logicalOr

                    Arguments

                    :: Tensor v'1 Bool

                    x

                    -> Tensor v'2 Bool

                    y

                    -> Tensor Build Bool

                    z

                    Returns the truth value of x OR y element-wise.

                    • NOTE*: LogicalOr supports broadcasting. More about broadcasting - here

                    logicalOr'

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Bool

                    x

                    -> Tensor v'2 Bool

                    y

                    -> Tensor Build Bool

                    z

                    lookupTableExport

                    Arguments

                    :: (MonadBuild m', TensorType tkeys, TensorType tvalues) 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> m' (Tensor Value tkeys, Tensor Value tvalues)

                    (keys, values)

                    • keys: Vector of all keys present in the table.
                    • values: Tensor of all values in the table. Indexed in parallel with keys.

                    Outputs all keys and values in the table.

                    lookupTableExport'

                    Arguments

                    :: (MonadBuild m', TensorType tkeys, TensorType tvalues) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> m' (Tensor Value tkeys, Tensor Value tvalues)

                    (keys, values)

                    • keys: Vector of all keys present in the table.
                    • values: Tensor of all values in the table. Indexed in parallel with keys.

                    lookupTableFind

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    default_value

                    -> m' (Tensor Value tout)

                    values: Same shape as keys. Values found in the table, or default_values + probability.

                    logicalAnd Source #

                    Arguments

                    :: Tensor v'1 Bool

                    x

                    -> Tensor v'2 Bool

                    y

                    -> Tensor Build Bool

                    z

                    Returns the truth value of x AND y element-wise.

                    • NOTE*: LogicalAnd supports broadcasting. More about broadcasting + here

                    logicalNot Source #

                    Arguments

                    :: Tensor v'1 Bool

                    x

                    -> Tensor Build Bool

                    y

                    Returns the truth value of NOT x element-wise.

                    logicalOr Source #

                    Arguments

                    :: Tensor v'1 Bool

                    x

                    -> Tensor v'2 Bool

                    y

                    -> Tensor Build Bool

                    z

                    Returns the truth value of x OR y element-wise.

                    • NOTE*: LogicalOr supports broadcasting. More about broadcasting + here

                    logicalOr' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Bool

                    x

                    -> Tensor v'2 Bool

                    y

                    -> Tensor Build Bool

                    z

                    lookupTableExport Source #

                    Arguments

                    :: (MonadBuild m', TensorType tkeys, TensorType tvalues) 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> m' (Tensor Value tkeys, Tensor Value tvalues)

                    (keys, values)

                    • keys: Vector of all keys present in the table.
                    • values: Tensor of all values in the table. Indexed in parallel with keys.

                    Outputs all keys and values in the table.

                    lookupTableExport' Source #

                    Arguments

                    :: (MonadBuild m', TensorType tkeys, TensorType tvalues) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> m' (Tensor Value tkeys, Tensor Value tvalues)

                    (keys, values)

                    • keys: Vector of all keys present in the table.
                    • values: Tensor of all values in the table. Indexed in parallel with keys.

                    lookupTableExportV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorType tkeys, TensorType tvalues) 
                    => Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> m' (Tensor Value tkeys, Tensor Value tvalues)

                    (keys, values)

                    • keys: Vector of all keys present in the table.
                    • values: Tensor of all values in the table. Indexed in parallel with keys.

                    Outputs all keys and values in the table.

                    lookupTableExportV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorType tkeys, TensorType tvalues) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> m' (Tensor Value tkeys, Tensor Value tvalues)

                    (keys, values)

                    • keys: Vector of all keys present in the table.
                    • values: Tensor of all values in the table. Indexed in parallel with keys.

                    lookupTableFind Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    default_value

                    -> m' (Tensor Value tout)

                    values: Same shape as keys. Values found in the table, or default_values for missing keys.

                    Looks up keys in a table, outputs the corresponding values.

                    The tensor keys must of the same type as the keys of the table. The output values is of the type of the table values.

                    The scalar default_value is the value output for keys not present in the - table. It must also be of the same type as the table values.

                    lookupTableFind'

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    default_value

                    -> m' (Tensor Value tout)

                    values: Same shape as keys. Values found in the table, or default_values - for missing keys.

                    lookupTableImport

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    Replaces the contents of the table with the specified keys and values.

                    The tensor keys must be of the same type as the keys of the table. - The tensor values must be of the type of the table values.

                    lookupTableImport'

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    lookupTableInsert

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    Updates the table to associates keys with values.

                    The tensor keys must be of the same type as the keys of the table. - The tensor values must be of the type of the table values.

                    lookupTableInsert'

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    lookupTableSize

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> m' (Tensor Value Int64)

                    size: Scalar that contains number of elements in the table.

                    Computes the number of elements in the given table.

                    lookupTableSize'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> m' (Tensor Value Int64)

                    size: Scalar that contains number of elements in the table.

                    loopCond

                    Arguments

                    :: Tensor v'1 Bool

                    input: A boolean scalar, representing the branch predicate of the Switch op.

                    -> Tensor Build Bool

                    output: The same tensor as input.

                    Forwards the input to the output.

                    This operator represents the loop termination condition used by the - "pivot" switches of a loop.

                    loopCond'

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Bool

                    input: A boolean scalar, representing the branch predicate of the Switch op.

                    -> Tensor Build Bool

                    output: The same tensor as input.

                    matMul

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    a

                    -> Tensor v'2 t

                    b

                    -> Tensor Build t

                    product

                    Multiply the matrix "a" by the matrix "b".

                    The inputs must be two-dimensional matrices and the inner dimension of + table. It must also be of the same type as the table values.

                    lookupTableFind' Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    default_value

                    -> m' (Tensor Value tout)

                    values: Same shape as keys. Values found in the table, or default_values + for missing keys.

                    lookupTableFindV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    default_value

                    -> m' (Tensor Value tout)

                    values: Same shape as keys. Values found in the table, or default_values + for missing keys.

                    Looks up keys in a table, outputs the corresponding values.

                    The tensor keys must of the same type as the keys of the table. + The output values is of the type of the table values.

                    The scalar default_value is the value output for keys not present in the + table. It must also be of the same type as the table values.

                    lookupTableFindV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    default_value

                    -> m' (Tensor Value tout)

                    values: Same shape as keys. Values found in the table, or default_values + for missing keys.

                    lookupTableImport Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    Replaces the contents of the table with the specified keys and values.

                    The tensor keys must be of the same type as the keys of the table. + The tensor values must be of the type of the table values.

                    lookupTableImport' Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    lookupTableImportV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    Replaces the contents of the table with the specified keys and values.

                    The tensor keys must be of the same type as the keys of the table. + The tensor values must be of the type of the table values.

                    lookupTableImportV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    lookupTableInsert Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    Updates the table to associates keys with values.

                    The tensor keys must be of the same type as the keys of the table. + The tensor values must be of the type of the table values.

                    lookupTableInsert' Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    lookupTableInsertV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    Updates the table to associates keys with values.

                    The tensor keys must be of the same type as the keys of the table. + The tensor values must be of the type of the table values.

                    lookupTableInsertV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorType tin, TensorType tout) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> Tensor v'2 tin

                    keys: Any shape. Keys to look up.

                    -> Tensor v'3 tout

                    values: Values to associate with keys.

                    -> m' ControlNode 

                    lookupTableSize Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> m' (Tensor Value Int64)

                    size: Scalar that contains number of elements in the table.

                    Computes the number of elements in the given table.

                    lookupTableSize' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    table_handle: Handle to the table.

                    -> m' (Tensor Value Int64)

                    size: Scalar that contains number of elements in the table.

                    lookupTableSizeV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> m' (Tensor Value Int64)

                    size: Scalar that contains number of elements in the table.

                    Computes the number of elements in the given table.

                    lookupTableSizeV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    table_handle: Handle to the table.

                    -> m' (Tensor Value Int64)

                    size: Scalar that contains number of elements in the table.

                    loopCond Source #

                    Arguments

                    :: Tensor v'1 Bool

                    input: A boolean scalar, representing the branch predicate of the Switch op.

                    -> Tensor Build Bool

                    output: The same tensor as input.

                    Forwards the input to the output.

                    This operator represents the loop termination condition used by the + "pivot" switches of a loop.

                    loopCond' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Bool

                    input: A boolean scalar, representing the branch predicate of the Switch op.

                    -> Tensor Build Bool

                    output: The same tensor as input.

                    makeIterator Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    dataset

                    -> Tensor v'2 ResourceHandle

                    iterator

                    -> m' ControlNode 

                    Makes a new iterator from the given dataset and stores it in iterator.

                    This operation may be executed multiple times. Each execution will reset the + iterator in iterator to the first element of dataset.

                    makeIterator' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    dataset

                    -> Tensor v'2 ResourceHandle

                    iterator

                    -> m' ControlNode 

                    mapClear Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    dtypes

                    -> m' ControlNode 

                    Op removes all elements in the underlying container.

                    mapClear' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    dtypes

                    -> m' ControlNode 

                    mapIncompleteSize Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    dtypes

                    -> m' (Tensor Value Int32)

                    size

                    Op returns the number of incomplete elements in the underlying container.

                    mapIncompleteSize' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    dtypes

                    -> m' (Tensor Value Int32)

                    size

                    mapPeek Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => Tensor v'1 Int64

                    key

                    -> Tensor v'2 Int32

                    indices

                    -> m' (TensorList Value dtypes)

                    values

                    Op peeks at the values at the specified key. If the

                    underlying container does not contain this key + this op will block until it does.

                    mapPeek' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => OpParams 
                    -> Tensor v'1 Int64

                    key

                    -> Tensor v'2 Int32

                    indices

                    -> m' (TensorList Value dtypes)

                    values

                    mapSize Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    dtypes

                    -> m' (Tensor Value Int32)

                    size

                    Op returns the number of elements in the underlying container.

                    mapSize' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    dtypes

                    -> m' (Tensor Value Int32)

                    size

                    mapStage Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes fake_dtypes) 
                    => [DataType]

                    dtypes

                    -> Tensor v'1 Int64

                    key: int64

                    -> Tensor v'2 Int32

                    indices

                    -> TensorList v'3 fake_dtypes

                    values: a list of tensors + dtypes A list of data types that inserted values should adhere to.

                    -> m' ControlNode 

                    Stage (key, values) in the underlying container which behaves like a hashtable.

                    mapStage' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes fake_dtypes) 
                    => OpParams 
                    -> [DataType]

                    dtypes

                    -> Tensor v'1 Int64

                    key: int64

                    -> Tensor v'2 Int32

                    indices

                    -> TensorList v'3 fake_dtypes

                    values: a list of tensors + dtypes A list of data types that inserted values should adhere to.

                    -> m' ControlNode 

                    mapUnstage Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => Tensor v'1 Int64

                    key

                    -> Tensor v'2 Int32

                    indices

                    -> m' (TensorList Value dtypes)

                    values

                    Op removes and returns the values associated with the key

                    from the underlying container. If the underlying container + does not contain this key, the op will block until it does.

                    mapUnstage' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => OpParams 
                    -> Tensor v'1 Int64

                    key

                    -> Tensor v'2 Int32

                    indices

                    -> m' (TensorList Value dtypes)

                    values

                    mapUnstageNoKey Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => Tensor v'1 Int32

                    indices

                    -> m' (Tensor Value Int64, TensorList Value dtypes)

                    (key, values)

                    • key
                    • values

                    Op removes and returns a random (key, value)

                    from the underlying container. If the underlying container + does not contain elements, the op will block until it does.

                    mapUnstageNoKey' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => OpParams 
                    -> Tensor v'1 Int32

                    indices

                    -> m' (Tensor Value Int64, TensorList Value dtypes)

                    (key, values)

                    • key
                    • values

                    matMul Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t 
                    => Tensor v'1 t

                    a

                    -> Tensor v'2 t

                    b

                    -> Tensor Build t

                    product

                    Multiply the matrix "a" by the matrix "b".

                    The inputs must be two-dimensional matrices and the inner dimension of "a" (after being transposed if transpose_a is true) must match the outer dimension of "b" (after being transposed if transposed_b is true).

                    • Note*: The default kernel implementation for MatMul on GPUs uses - cublas.

                    matMul'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    a

                    -> Tensor v'2 t

                    b

                    -> Tensor Build t

                    product

                    matchingFiles

                    Arguments

                    :: Tensor v'1 ByteString

                    pattern: A (scalar) shell wildcard pattern.

                    -> Tensor Build ByteString

                    filenames: A vector of matching filenames.

                    Returns the set of files matching a pattern.

                    Note that this routine only supports wildcard characters in the - basename portion of the pattern, not in the directory portion.

                    matchingFiles'

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 ByteString

                    pattern: A (scalar) shell wildcard pattern.

                    -> Tensor Build ByteString

                    filenames: A vector of matching filenames.

                    matrixBandPart

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input: Rank k tensor.

                    -> Tensor v'2 Int64

                    num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire - lower triangle.

                    -> Tensor v'3 Int64

                    num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep - entire upper triangle.

                    -> Tensor Build t

                    band: Rank k tensor of the same shape as input. The extracted banded tensor.

                    Copy a tensor setting everything outside a central band in each innermost matrix

                    to zero.

                    The band part is computed as follows: + cublas.

                    matMul' Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    a

                    -> Tensor v'2 t

                    b

                    -> Tensor Build t

                    product

                    matchingFiles Source #

                    Arguments

                    :: Tensor v'1 ByteString

                    pattern: Shell wildcard pattern(s). Scalar or vector of type string.

                    -> Tensor Build ByteString

                    filenames: A vector of matching filenames.

                    Returns the set of files matching one or more glob patterns.

                    Note that this routine only supports wildcard characters in the + basename portion of the pattern, not in the directory portion.

                    matchingFiles' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 ByteString

                    pattern: Shell wildcard pattern(s). Scalar or vector of type string.

                    -> Tensor Build ByteString

                    filenames: A vector of matching filenames.

                    matrixBandPart Source #

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input: Rank k tensor.

                    -> Tensor v'2 Int64

                    num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire + lower triangle.

                    -> Tensor v'3 Int64

                    num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep + entire upper triangle.

                    -> Tensor Build t

                    band: Rank k tensor of the same shape as input. The extracted banded tensor.

                    Copy a tensor setting everything outside a central band in each innermost matrix

                    to zero.

                    The band part is computed as follows: Assume input has k dimensions `[I, J, K, ..., M, N]`, then the output is a tensor with the same shape where

                    `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.

                    The indicator function

                    `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && - (num_upper < 0 || (n-m) <= num_upper)`.

                    For example:

                    ```prettyprint + (num_upper < 0 || (n-m) <= num_upper)`.

                    For example:

                    ``` # if input is [[ 0, 1, 2, 3] [-1, 0, 1, 2] [-2, -1, 0, 1] @@ -1478,17 +1668,17 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [-1, 0, 1, 0] [-2, -1, 0, 1] [ 0, -2, -1, 0]] - ```

                    Useful special cases:

                    ```prettyprint + ```

                    Useful special cases:

                    ``` tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. tf.matrix_band_part(input, 0, 0) ==> Diagonal. - ```

                    matrixBandPart'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Rank k tensor.

                    -> Tensor v'2 Int64

                    num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire - lower triangle.

                    -> Tensor v'3 Int64

                    num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep - entire upper triangle.

                    -> Tensor Build t

                    band: Rank k tensor of the same shape as input. The extracted banded tensor.

                    matrixDeterminant

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => Tensor v'1 t

                    input: Shape is `[..., M, M]`.

                    -> Tensor Build t

                    output: Shape is `[...]`.

                    Computes the determinant of one ore more square matrices.

                    The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + ```

                    matrixBandPart' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Rank k tensor.

                    -> Tensor v'2 Int64

                    num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire + lower triangle.

                    -> Tensor v'3 Int64

                    num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep + entire upper triangle.

                    -> Tensor Build t

                    band: Rank k tensor of the same shape as input. The extracted banded tensor.

                    matrixDeterminant Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => Tensor v'1 t

                    input: Shape is `[..., M, M]`.

                    -> Tensor Build t

                    output: Shape is `[...]`.

                    Computes the determinant of one ore more square matrices.

                    The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor containing the determinants - for all input submatrices `[..., :, :]`.

                    matrixDeterminant'

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Shape is `[..., M, M]`.

                    -> Tensor Build t

                    output: Shape is `[...]`.

                    matrixDiag

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    diagonal: Rank k, where `k >= 1`.

                    -> Tensor Build t

                    output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.

                    Returns a batched diagonal tensor with a given batched diagonal values.

                    Given a diagonal, this operation returns a tensor with the diagonal and + for all input submatrices `[..., :, :]`.

                    matrixDeterminant' Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Shape is `[..., M, M]`.

                    -> Tensor Build t

                    output: Shape is `[...]`.

                    matrixDiag Source #

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    diagonal: Rank k, where `k >= 1`.

                    -> Tensor Build t

                    output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.

                    Returns a batched diagonal tensor with a given batched diagonal values.

                    Given a diagonal, this operation returns a tensor with the diagonal and everything else padded with zeros. The diagonal is computed as follows:

                    Assume diagonal has k dimensions `[I, J, K, ..., N]`, then the output is a - tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:

                    `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.

                    For example:

                    ```prettyprint + tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:

                    `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.

                    For example:

                    ``` # diagonal is [[1, 2, 3, 4], [5, 6, 7, 8]]

                    and diagonal.shape = (2, 4)

                    tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] @@ -1497,10 +1687,10 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, 8]]]

                    which has shape (2, 4, 4) - ```

                    matrixDiag'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    diagonal: Rank k, where `k >= 1`.

                    -> Tensor Build t

                    output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.

                    matrixDiagPart

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input: Rank k tensor where `k >= 2`.

                    -> Tensor Build t

                    diagonal: The extracted diagonal(s) having shape + ```

                    matrixDiag' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    diagonal: Rank k, where `k >= 1`.

                    -> Tensor Build t

                    output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.

                    matrixDiagPart Source #

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input: Rank k tensor where `k >= 2`.

                    -> Tensor Build t

                    diagonal: The extracted diagonal(s) having shape `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.

                    Returns the batched diagonal part of a batched tensor.

                    This operation returns a tensor with the diagonal part of the batched input. The diagonal part is computed as follows:

                    Assume input has k dimensions `[I, J, K, ..., M, N]`, then the output is a - tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:

                    `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.

                    The input must be at least a matrix.

                    For example:

                    ```prettyprint + tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:

                    `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.

                    The input must be at least a matrix.

                    For example:

                    ``` # input is [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] @@ -1509,34 +1699,34 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, 8]]]

                    and input.shape = (2, 4, 4)

                    tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]

                    which has shape (2, 4) - ```

                    matrixDiagPart'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Rank k tensor where `k >= 2`.

                    -> Tensor Build t

                    diagonal: The extracted diagonal(s) having shape - `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.

                    matrixInverse

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => Tensor v'1 t

                    input: Shape is `[..., M, M]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, M]`.

                    compatibility(numpy) + ```

                    matrixDiagPart' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Rank k tensor where `k >= 2`.

                    -> Tensor Build t

                    diagonal: The extracted diagonal(s) having shape + `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.

                    matrixInverse Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => Tensor v'1 t

                    input: Shape is `[..., M, M]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, M]`.

                    compatibility(numpy) Equivalent to np.linalg.inv end_compatibility

                    Computes the inverse of one or more square invertible matrices or their

                    adjoints (conjugate transposes).

                    The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor of the same shape as the input containing the inverse for all input submatrices `[..., :, :]`.

                    The op uses LU decomposition with partial pivoting to compute the inverses.

                    If a matrix is not invertible there is no guarantee what the op does. It may detect the condition and raise an exception or it may simply return a - garbage result.

                    matrixInverse'

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Shape is `[..., M, M]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, M]`.

                    compatibility(numpy) + garbage result.

                    matrixInverse' Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Shape is `[..., M, M]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, M]`.

                    compatibility(numpy) Equivalent to np.linalg.inv - end_compatibility

                    matrixSetDiag

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input: Rank `k+1`, where `k >= 1`.

                    -> Tensor v'2 t

                    diagonal: Rank k, where `k >= 1`.

                    -> Tensor Build t

                    output: Rank `k+1`, with `output.shape = input.shape`.

                    Returns a batched matrix tensor with new batched diagonal values.

                    Given input and diagonal, this operation returns a tensor with the + end_compatibility

                    matrixSetDiag Source #

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input: Rank `k+1`, where `k >= 1`.

                    -> Tensor v'2 t

                    diagonal: Rank k, where `k >= 1`.

                    -> Tensor Build t

                    output: Rank `k+1`, with `output.shape = input.shape`.

                    Returns a batched matrix tensor with new batched diagonal values.

                    Given input and diagonal, this operation returns a tensor with the same shape and values as input, except for the main diagonal of the innermost matrices. These will be overwritten by the values in diagonal.

                    The output is computed as follows:

                    Assume input has `k+1` dimensions `[I, J, K, ..., M, N]` and diagonal has k dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a - tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:

                    • `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
                    • `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.

                    matrixSetDiag'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Rank `k+1`, where `k >= 1`.

                    -> Tensor v'2 t

                    diagonal: Rank k, where `k >= 1`.

                    -> Tensor Build t

                    output: Rank `k+1`, with `output.shape = input.shape`.

                    matrixSolve

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Double, Float]` t 
                    => Tensor v'1 t

                    matrix: Shape is `[..., M, M]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, K]`.

                    Solves systems of linear equations.

                    Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:

                    • `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
                    • `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.

                    matrixSetDiag' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Rank `k+1`, where `k >= 1`.

                    -> Tensor v'2 t

                    diagonal: Rank k, where `k >= 1`.

                    -> Tensor Build t

                    output: Rank `k+1`, with `output.shape = input.shape`.

                    matrixSolve Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => Tensor v'1 t

                    matrix: Shape is `[..., M, M]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, K]`.

                    Solves systems of linear equations.

                    Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. Rhs is a tensor of shape `[..., M, K]`. The output is - a tensor shape `[..., M, K]`. If adjoint is False then each output matrix + a tensor shape `[..., M, K]`. If adjoint is False then each output matrix satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. - If adjoint is True then each output matrix satisfies - `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.

                    matrixSolve'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    matrix: Shape is `[..., M, M]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, K]`.

                    matrixSolveLs

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => Tensor v'1 t

                    matrix: Shape is `[..., M, N]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor v'3 Double

                    l2_regularizer: Scalar tensor.

                    compatibility(numpy) + If adjoint is True then each output matrix satisfies + `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.

                    matrixSolve' Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    matrix: Shape is `[..., M, M]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, K]`.

                    matrixSolveLs Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => Tensor v'1 t

                    matrix: Shape is `[..., M, N]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor v'3 Double

                    l2_regularizer: Scalar tensor.

                    compatibility(numpy) Equivalent to np.linalg.lstsq - end_compatibility

                    -> Tensor Build t

                    output: Shape is `[..., N, K]`.

                    Solves one or more linear least-squares problems.

                    matrix is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions + end_compatibility

                    -> Tensor Build t

                    output: Shape is `[..., N, K]`.

                    Solves one or more linear least-squares problems.

                    matrix is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`. The output is a tensor shape `[..., N, K]` where each output matrix solves each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :] in the least squares sense.

                    matrix and right-hand sides in the batch:

                    matrix=\(A in Re^{m times n}\), rhs=\(B in Re^{m times k}\), output=\(X in Re^{n times k}\), - l2_regularizer=\(lambda\).

                    If fast is True, then the solution is computed by solving the normal + l2_regularizer=\(lambda\).

                    If fast is True, then the solution is computed by solving the normal equations using Cholesky decomposition. Specifically, if \(m ge n\) then \(X = (A^T A + lambda I)^{-1} A^T B\), which solves the least-squares problem \(X = mathrm{argmin}_{Z in Re^{n times k} } ||A Z - B||_F^2 + @@ -1547,59 +1737,71 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core \(A Z = B\). Notice that the fast path is only numerically stable when \(A\) is numerically full rank and has a condition number \(mathrm{cond}(A) lt frac{1}{sqrt{epsilon_{mach} } }\) or\(lambda\) is - sufficiently large.

                    If fast is False an algorithm based on the numerically robust complete + sufficiently large.

                    If fast is False an algorithm based on the numerically robust complete orthogonal decomposition is used. This computes the minimum-norm least-squares solution, even when \(A\) is rank deficient. This path is - typically 6-7 times slower than the fast path. If fast is False then - l2_regularizer is ignored.

                    matrixSolveLs'

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    matrix: Shape is `[..., M, N]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor v'3 Double

                    l2_regularizer: Scalar tensor.

                    compatibility(numpy) + typically 6-7 times slower than the fast path. If fast is False then + l2_regularizer is ignored.

                    matrixSolveLs' Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    matrix: Shape is `[..., M, N]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor v'3 Double

                    l2_regularizer: Scalar tensor.

                    compatibility(numpy) Equivalent to np.linalg.lstsq - end_compatibility

                    -> Tensor Build t

                    output: Shape is `[..., N, K]`.

                    matrixTriangularSolve

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => Tensor v'1 t

                    matrix: Shape is `[..., M, M]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, K]`.

                    Solves systems of linear equations with upper or lower triangular matrices by

                    backsubstitution.

                    matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form - square matrices. If lower is True then the strictly upper triangular part + end_compatibility

                    -> Tensor Build t

                    output: Shape is `[..., N, K]`.

                    matrixTriangularSolve Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => Tensor v'1 t

                    matrix: Shape is `[..., M, M]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, K]`.

                    Solves systems of linear equations with upper or lower triangular matrices by

                    backsubstitution.

                    matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + square matrices. If lower is True then the strictly upper triangular part of each inner-most matrix is assumed to be zero and not accessed. If lower is False then the strictly lower triangular part of each inner-most matrix is assumed to be zero and not accessed. rhs is a tensor of shape `[..., M, K]`.

                    The output is a tensor of shape `[..., M, K]`. If adjoint is - True then the innermost matrices in output` satisfy matrix equations + True then the innermost matrices in output` satisfy matrix equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. - If adjoint is False then the strictly then the innermost matrices in + If adjoint is False then the strictly then the innermost matrices in output satisfy matrix equations - `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.

                    matrixTriangularSolve'

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    matrix: Shape is `[..., M, M]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, K]`.

                    max

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                    => Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    Computes the maximum of elements across dimensions of a tensor.

                    Reduces input along the dimensions given in reduction_indices. Unless + `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.

                    matrixTriangularSolve' Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    matrix: Shape is `[..., M, M]`.

                    -> Tensor v'2 t

                    rhs: Shape is `[..., M, K]`.

                    -> Tensor Build t

                    output: Shape is `[..., M, K]`.

                    max Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                    => Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    Computes the maximum of elements across dimensions of a tensor.

                    Reduces input along the dimensions given in reduction_indices. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

                    max'

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    maxPool

                    Arguments

                    :: OneOf `[Word16, Float]` t 
                    => Tensor v'1 t

                    input: 4-D input to pool over.

                    -> Tensor Build t

                    output: The max pooled output tensor.

                    Performs max pooling on the input.

                    maxPool'

                    Arguments

                    :: OneOf `[Word16, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: 4-D input to pool over.

                    -> Tensor Build t

                    output: The max pooled output tensor.

                    maxPool3D

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

                    -> Tensor Build t

                    output: The max pooled output tensor.

                    Performs 3D max pooling on the input.

                    maxPool3D'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

                    -> Tensor Build t

                    output: The max pooled output tensor.

                    maxPool3DGrad

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 Float

                    orig_input: The original input tensor.

                    -> Tensor v'2 Float

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

                    -> Tensor Build t

                    output

                    Computes gradients of max pooling function.

                    maxPool3DGrad'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 Float

                    orig_input: The original input tensor.

                    -> Tensor v'2 Float

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

                    -> Tensor Build t

                    output

                    maxPoolGrad

                    Arguments

                    :: OneOf `[Word16, Float]` t 
                    => Tensor v'1 t

                    orig_input: The original input tensor.

                    -> Tensor v'2 t

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: 4-D. Gradients w.r.t. the output of max_pool.

                    -> Tensor Build t

                    output: Gradients w.r.t. the input to max_pool.

                    Computes gradients of the maxpooling function.

                    maxPoolGrad'

                    Arguments

                    :: OneOf `[Word16, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    orig_input: The original input tensor.

                    -> Tensor v'2 t

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: 4-D. Gradients w.r.t. the output of max_pool.

                    -> Tensor Build t

                    output: Gradients w.r.t. the input to max_pool.

                    maxPoolGradWithArgmax

                    Arguments

                    :: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) 
                    => Tensor v'1 t

                    input: The original input.

                    -> Tensor v'2 t

                    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the - output of max_pool.

                    -> Tensor v'3 targmax

                    argmax: The indices of the maximum values chosen for each output of max_pool.

                    -> Tensor Build t

                    output: Gradients w.r.t. the input of max_pool.

                    Computes gradients of the maxpooling function.

                    maxPoolGradWithArgmax'

                    Arguments

                    :: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The original input.

                    -> Tensor v'2 t

                    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the - output of max_pool.

                    -> Tensor v'3 targmax

                    argmax: The indices of the maximum values chosen for each output of max_pool.

                    -> Tensor Build t

                    output: Gradients w.r.t. the input of max_pool.

                    maxPoolWithArgmax

                    Arguments

                    :: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) 
                    => Tensor v'1 t

                    input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.

                    -> (Tensor Build t, Tensor Build targmax)

                    (output, argmax)

                    • output: The max pooled output tensor.
                    • argmax: 4-D. The flattened indices of the max values chosen for each output.

                    Performs max pooling on the input and outputs both max values and indices.

                    The indices in argmax are flattened, so that a maximum value at position + retained with length 1.

                    max' Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    maxPool Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    input: 4-D input to pool over.

                    -> Tensor Build t

                    output: The max pooled output tensor.

                    Performs max pooling on the input.

                    maxPool' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: 4-D input to pool over.

                    -> Tensor Build t

                    output: The max pooled output tensor.

                    maxPool3D Source #

                    Arguments

                    :: OneOf '[Float] t 
                    => Tensor v'1 t

                    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

                    -> Tensor Build t

                    output: The max pooled output tensor.

                    Performs 3D max pooling on the input.

                    maxPool3D' Source #

                    Arguments

                    :: OneOf '[Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

                    -> Tensor Build t

                    output: The max pooled output tensor.

                    maxPool3DGrad Source #

                    Arguments

                    :: (OneOf '[Float] t, OneOf '[Float] tInput) 
                    => Tensor v'1 tInput

                    orig_input: The original input tensor.

                    -> Tensor v'2 tInput

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

                    -> Tensor Build t

                    output

                    Computes gradients of max pooling function.

                    maxPool3DGrad' Source #

                    Arguments

                    :: (OneOf '[Float] t, OneOf '[Float] tInput) 
                    => OpParams 
                    -> Tensor v'1 tInput

                    orig_input: The original input tensor.

                    -> Tensor v'2 tInput

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

                    -> Tensor Build t

                    output

                    maxPool3DGradGrad Source #

                    Arguments

                    :: OneOf '[Float] t 
                    => Tensor v'1 t

                    orig_input: The original input tensor.

                    -> Tensor v'2 t

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

                    -> Tensor Build t

                    output: Gradients of gradients w.r.t. the input to max_pool.

                    Computes second-order gradients of the maxpooling function.

                    maxPool3DGradGrad' Source #

                    Arguments

                    :: OneOf '[Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    orig_input: The original input tensor.

                    -> Tensor v'2 t

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

                    -> Tensor Build t

                    output: Gradients of gradients w.r.t. the input to max_pool.

                    maxPoolGrad Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    orig_input: The original input tensor.

                    -> Tensor v'2 t

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: 4-D. Gradients w.r.t. the output of max_pool.

                    -> Tensor Build t

                    output: Gradients w.r.t. the input to max_pool.

                    Computes gradients of the maxpooling function.

                    maxPoolGrad' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    orig_input: The original input tensor.

                    -> Tensor v'2 t

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: 4-D. Gradients w.r.t. the output of max_pool.

                    -> Tensor Build t

                    output: Gradients w.r.t. the input to max_pool.

                    maxPoolGradGrad Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    orig_input: The original input tensor.

                    -> Tensor v'2 t

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: 4-D. Gradients of gradients w.r.t. the input of max_pool.

                    -> Tensor Build t

                    output: Gradients of gradients w.r.t. the input to max_pool.

                    Computes second-order gradients of the maxpooling function.

                    maxPoolGradGrad' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    orig_input: The original input tensor.

                    -> Tensor v'2 t

                    orig_output: The original output tensor.

                    -> Tensor v'3 t

                    grad: 4-D. Gradients of gradients w.r.t. the input of max_pool.

                    -> Tensor Build t

                    output: Gradients of gradients w.r.t. the input to max_pool.

                    maxPoolGradGradWithArgmax Source #

                    Arguments

                    :: (OneOf '[Int32, Int64] targmax, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 t

                    input: The original input.

                    -> Tensor v'2 t

                    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + input of max_pool.

                    -> Tensor v'3 targmax

                    argmax: The indices of the maximum values chosen for each output of max_pool.

                    -> Tensor Build t

                    output: Gradients of gradients w.r.t. the input of max_pool.

                    Computes second-order gradients of the maxpooling function.

                    maxPoolGradGradWithArgmax' Source #

                    Arguments

                    :: (OneOf '[Int32, Int64] targmax, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The original input.

                    -> Tensor v'2 t

                    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + input of max_pool.

                    -> Tensor v'3 targmax

                    argmax: The indices of the maximum values chosen for each output of max_pool.

                    -> Tensor Build t

                    output: Gradients of gradients w.r.t. the input of max_pool.

                    maxPoolGradWithArgmax Source #

                    Arguments

                    :: (OneOf '[Int32, Int64] targmax, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 t

                    input: The original input.

                    -> Tensor v'2 t

                    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + output of max_pool.

                    -> Tensor v'3 targmax

                    argmax: The indices of the maximum values chosen for each output of max_pool.

                    -> Tensor Build t

                    output: Gradients w.r.t. the input of max_pool.

                    Computes gradients of the maxpooling function.

                    maxPoolGradWithArgmax' Source #

                    Arguments

                    :: (OneOf '[Int32, Int64] targmax, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The original input.

                    -> Tensor v'2 t

                    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + output of max_pool.

                    -> Tensor v'3 targmax

                    argmax: The indices of the maximum values chosen for each output of max_pool.

                    -> Tensor Build t

                    output: Gradients w.r.t. the input of max_pool.

                    maxPoolWithArgmax Source #

                    Arguments

                    :: (OneOf '[Int32, Int64] targmax, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 t

                    input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.

                    -> (Tensor Build t, Tensor Build targmax)

                    (output, argmax)

                    • output: The max pooled output tensor.
                    • argmax: 4-D. The flattened indices of the max values chosen for each output.

                    Performs max pooling on the input and outputs both max values and indices.

                    The indices in argmax are flattened, so that a maximum value at position `[b, y, x, c]` becomes flattened index - `((b * height + y) * width + x) * channels + c`.

                    maxPoolWithArgmax'

                    Arguments

                    :: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.

                    -> (Tensor Build t, Tensor Build targmax)

                    (output, argmax)

                    • output: The max pooled output tensor.
                    • argmax: 4-D. The flattened indices of the max values chosen for each output.

                    maximum

                    Arguments

                    :: OneOf `[Int32, Int64, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns the max of x and y (i.e. x > y ? x : y) element-wise.

                    • NOTE*: Maximum supports broadcasting. More about broadcasting - here

                    maximum'

                    Arguments

                    :: OneOf `[Int32, Int64, Word16, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    mean

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                    => Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    Computes the mean of elements across dimensions of a tensor.

                    Reduces input along the dimensions given in reduction_indices. Unless + `((b * height + y) * width + x) * channels + c`.

                    The indices returned are always in `[0, height) x [0, width)` before flattening, + even if padding is involved and the mathematically correct answer is outside + (either negative or too large). This is a bug, but fixing it is difficult to do + in a safe backwards compatible way, especially due to flattening.

                    maxPoolWithArgmax' Source #

                    Arguments

                    :: (OneOf '[Int32, Int64] targmax, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.

                    -> (Tensor Build t, Tensor Build targmax)

                    (output, argmax)

                    • output: The max pooled output tensor.
                    • argmax: 4-D. The flattened indices of the max values chosen for each output.

                    maximum Source #

                    Arguments

                    :: OneOf '[Int32, Int64, Word16, Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns the max of x and y (i.e. x > y ? x : y) element-wise.

                    • NOTE*: Maximum supports broadcasting. More about broadcasting + here

                    maximum' Source #

                    Arguments

                    :: OneOf '[Int32, Int64, Word16, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    mean Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                    => Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    Computes the mean of elements across dimensions of a tensor.

                    Reduces input along the dimensions given in reduction_indices. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

                    mean'

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    merge

                    Arguments

                    :: TensorType t 
                    => [Tensor v'1 t]

                    inputs: The input tensors, exactly one of which will become available.

                    -> (Tensor Build t, Tensor Build Int32)

                    (output, value_index)

                    • output: Will be set to the available input tensor.
                    • value_index: The index of the chosen input tensor in inputs.

                    Forwards the value of an available tensor from inputs to output.

                    Merge waits for at least one of the tensors in inputs to become available. - It is usually combined with Switch to implement branching.

                    Merge forwards the first tensor for become available to output, and sets - value_index to its index in inputs.

                    merge'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> [Tensor v'1 t]

                    inputs: The input tensors, exactly one of which will become available.

                    -> (Tensor Build t, Tensor Build Int32)

                    (output, value_index)

                    • output: Will be set to the available input tensor.
                    • value_index: The index of the chosen input tensor in inputs.

                    mergeSummary

                    Arguments

                    :: [Tensor v'1 ByteString]

                    inputs: Can be of any shape. Each must contain serialized Summary protocol - buffers.

                    -> Tensor Build ByteString

                    summary: Scalar. Serialized Summary protocol buffer.

                    Merges summaries.

                    This op creates a + retained with length 1.

                    mean' Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    merge Source #

                    Arguments

                    :: TensorType t 
                    => [Tensor v'1 t]

                    inputs: The input tensors, exactly one of which will become available.

                    -> (Tensor Build t, Tensor Build Int32)

                    (output, value_index)

                    • output: Will be set to the available input tensor.
                    • value_index: The index of the chosen input tensor in inputs.

                    Forwards the value of an available tensor from inputs to output.

                    Merge waits for at least one of the tensors in inputs to become available. + It is usually combined with Switch to implement branching.

                    Merge forwards the first tensor to become available to output, and sets + value_index to its index in inputs.

                    merge' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> [Tensor v'1 t]

                    inputs: The input tensors, exactly one of which will become available.

                    -> (Tensor Build t, Tensor Build Int32)

                    (output, value_index)

                    • output: Will be set to the available input tensor.
                    • value_index: The index of the chosen input tensor in inputs.

                    mergeSummary Source #

                    Arguments

                    :: [Tensor v'1 ByteString]

                    inputs: Can be of any shape. Each must contain serialized Summary protocol + buffers.

                    -> Tensor Build ByteString

                    summary: Scalar. Serialized Summary protocol buffer.

                    Merges summaries.

                    This op creates a `Summary` protocol buffer that contains the union of all the values in the input summaries.

                    When the Op is run, it reports an InvalidArgument error if multiple values - in the summaries to merge use the same tag.

                    mergeSummary'

                    Arguments

                    :: OpParams 
                    -> [Tensor v'1 ByteString]

                    inputs: Can be of any shape. Each must contain serialized Summary protocol - buffers.

                    -> Tensor Build ByteString

                    summary: Scalar. Serialized Summary protocol buffer.

                    mergeV2Checkpoints

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ByteString

                    checkpoint_prefixes: prefixes of V2 checkpoints to merge.

                    -> Tensor v'2 ByteString

                    destination_prefix: scalar. The desired final prefix. Allowed to be the same - as one of the checkpoint_prefixes.

                    -> m' ControlNode 

                    V2 format specific: merges the metadata files of sharded checkpoints. The

                    result is one logical checkpoint, with one physical metadata file and renamed + in the summaries to merge use the same tag.

                    mergeSummary' Source #

                    Arguments

                    :: OpParams 
                    -> [Tensor v'1 ByteString]

                    inputs: Can be of any shape. Each must contain serialized Summary protocol + buffers.

                    -> Tensor Build ByteString

                    summary: Scalar. Serialized Summary protocol buffer.

                    mergeV2Checkpoints Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ByteString

                    checkpoint_prefixes: prefixes of V2 checkpoints to merge.

                    -> Tensor v'2 ByteString

                    destination_prefix: scalar. The desired final prefix. Allowed to be the same + as one of the checkpoint_prefixes.

                    -> m' ControlNode 

                    V2 format specific: merges the metadata files of sharded checkpoints. The

                    result is one logical checkpoint, with one physical metadata file and renamed data files.

                    Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.

                    If delete_old_dirs is true, attempts to delete recursively the dirname of each path in the input checkpoint_prefixes. This is useful when those paths are non - user-facing temporary locations.

                    mergeV2Checkpoints'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    checkpoint_prefixes: prefixes of V2 checkpoints to merge.

                    -> Tensor v'2 ByteString

                    destination_prefix: scalar. The desired final prefix. Allowed to be the same - as one of the checkpoint_prefixes.

                    -> m' ControlNode 

                    min

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                    => Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    Computes the minimum of elements across dimensions of a tensor.

                    Reduces input along the dimensions given in reduction_indices. Unless + user-facing temporary locations.

                    mergeV2Checkpoints' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    checkpoint_prefixes: prefixes of V2 checkpoints to merge.

                    -> Tensor v'2 ByteString

                    destination_prefix: scalar. The desired final prefix. Allowed to be the same + as one of the checkpoint_prefixes.

                    -> m' ControlNode 

                    mfcc Source #

                    Arguments

                    :: Tensor v'1 Float

                    spectrogram: Typically produced by the Spectrogram op, with magnitude_squared + set to true.

                    -> Tensor v'2 Int32

                    sample_rate: How many samples per second the source audio used.

                    -> Tensor Build Float

                    output

                    Transforms a spectrogram into a form that's useful for speech recognition.

                    Mel Frequency Cepstral Coefficients are a way of representing audio data that's + been effective as an input feature for machine learning. They are created by + taking the spectrum of a spectrogram (a cepstrum), and discarding some of the + higher frequencies that are less significant to the human ear. They have a long + history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum + is a good resource to learn more.

                    mfcc' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Float

                    spectrogram: Typically produced by the Spectrogram op, with magnitude_squared + set to true.

                    -> Tensor v'2 Int32

                    sample_rate: How many samples per second the source audio used.

                    -> Tensor Build Float

                    output

                    min Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                    => Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    Computes the minimum of elements across dimensions of a tensor.

                    Reduces input along the dimensions given in reduction_indices. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

                    min'

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    minimum

                    Arguments

                    :: OneOf `[Int32, Int64, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns the min of x and y (i.e. x < y ? x : y) element-wise.

                    • NOTE*: Minimum supports broadcasting. More about broadcasting - here

                    minimum'

                    Arguments

                    :: OneOf `[Int32, Int64, Word16, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    mirrorPad

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
                    => Tensor v'1 t

                    input: The input tensor to be padded.

                    -> Tensor v'2 tpaddings

                    paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

                    -> Tensor Build t

                    output: The padded tensor.

                    Pads a tensor with mirrored values.

                    This operation pads a input with mirrored values according to the paddings + retained with length 1.

                    min' Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    minimum Source #

                    Arguments

                    :: OneOf '[Int32, Int64, Word16, Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns the min of x and y (i.e. x < y ? x : y) element-wise.

                    • NOTE*: Minimum supports broadcasting. More about broadcasting + here

                    minimum' Source #

                    Arguments

                    :: OneOf '[Int32, Int64, Word16, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    mirrorPad Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                    => Tensor v'1 t

                    input: The input tensor to be padded.

                    -> Tensor v'2 tpaddings

                    paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

                    -> Tensor Build t

                    output: The padded tensor.

                    Pads a tensor with mirrored values.

                    This operation pads a input with mirrored values according to the paddings you specify. paddings is an integer tensor with shape `[n, 2]`, where n is the rank of input. For each dimension D of input, `paddings[D, 0]` indicates how many values to add before the contents of input in that dimension, and `paddings[D, 1]` indicates how many values to add after the contents of input in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if copy_border is true - (if false, respectively).

                    The padded size of each dimension D of the output is:

                    `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

                    For example:

                    ```prettyprint + (if false, respectively).

                    The padded size of each dimension D of the output is:

                    `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

                    For example:

                    ``` # t is [[1, 2, 3], [4, 5, 6]]. # paddings is [[1, 1]], [2, 2]]. # mode is SYMMETRIC. @@ -1608,35 +1810,68 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]] - ```

                    mirrorPad'

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The input tensor to be padded.

                    -> Tensor v'2 tpaddings

                    paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

                    -> Tensor Build t

                    output: The padded tensor.

                    mirrorPadGrad

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
                    => Tensor v'1 t

                    input: The input tensor to be folded.

                    -> Tensor v'2 tpaddings

                    paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

                    -> Tensor Build t

                    output: The folded tensor.

                    Gradient op for MirrorPad op. This op folds a mirror-padded tensor.

                    This operation folds the padded areas of input by MirrorPad according to the + ```

                    mirrorPad' Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The input tensor to be padded.

                    -> Tensor v'2 tpaddings

                    paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

                    -> Tensor Build t

                    output: The padded tensor.

                    mirrorPadGrad Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                    => Tensor v'1 t

                    input: The input tensor to be folded.

                    -> Tensor v'2 tpaddings

                    paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

                    -> Tensor Build t

                    output: The folded tensor.

                    Gradient op for MirrorPad op. This op folds a mirror-padded tensor.

                    This operation folds the padded areas of input by MirrorPad according to the paddings you specify. paddings must be the same as paddings argument - given to the corresponding MirrorPad op.

                    The folded size of each dimension D of the output is:

                    `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`

                    For example:

                    ```prettyprint + given to the corresponding MirrorPad op.

                    The folded size of each dimension D of the output is:

                    `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`

                    For example:

                    ``` # t is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. # paddings is [[0, 1]], [0, 1]]. # mode is SYMMETRIC. # rank of t is 2. pad(t, paddings) ==> [[ 1, 5] [11, 28]] - ```

                    mirrorPadGrad'

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The input tensor to be folded.

                    -> Tensor v'2 tpaddings

                    paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

                    -> Tensor Build t

                    output: The folded tensor.

                    mod

                    Arguments

                    :: OneOf `[Int32, Int64, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns element-wise remainder of division.

                    • NOTE*: Mod supports broadcasting. More about broadcasting - here

                    mod'

                    Arguments

                    :: OneOf `[Int32, Int64, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    mul

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns x * y element-wise.

                    • NOTE*: Mul supports broadcasting. More about broadcasting - here

                    mul'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    multinomial

                    Arguments

                    :: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => Tensor v'1 t

                    logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` - represents the unnormalized log probabilities for all classes.

                    -> Tensor v'2 Int32

                    num_samples: 0-D. Number of independent samples to draw for each row slice.

                    -> m' (Tensor Value Int64)

                    output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` - contains the drawn class labels with range `[0, num_classes)`.

                    Draws samples from a multinomial distribution.

                    multinomial'

                    Arguments

                    :: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> Tensor v'1 t

                    logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` - represents the unnormalized log probabilities for all classes.

                    -> Tensor v'2 Int32

                    num_samples: 0-D. Number of independent samples to draw for each row slice.

                    -> m' (Tensor Value Int64)

                    output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` - contains the drawn class labels with range `[0, num_classes)`.

                    mutableDenseHashTable

                    Arguments

                    :: (MonadBuild m', TensorType key_dtype) 
                    => DataType

                    value_dtype: Type of the table values.

                    -> Tensor v'1 key_dtype

                    empty_key: The key used to represent empty key buckets internally. Must not - be used in insert or lookup operations.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    Creates an empty hash table that uses tensors as the backing store. It uses

                    "open addressing" with quadratic reprobing to resolve collisions.

                    This op creates a mutable hash table, specifying the type of its keys and + ```

                    mirrorPadGrad' Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The input tensor to be folded.

                    -> Tensor v'2 tpaddings

                    paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

                    -> Tensor Build t

                    output: The folded tensor.

                    mod Source #

                    Arguments

                    :: OneOf '[Int32, Int64, Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns element-wise remainder of division. This emulates C semantics in that

                    the result here is consistent with a truncating divide. E.g. `truncate(x / y) * + y + truncate_mod(x, y) = x`.

                    • NOTE*: Mod supports broadcasting. More about broadcasting + here

                    mod' Source #

                    Arguments

                    :: OneOf '[Int32, Int64, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    mul Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns x * y element-wise.

                    • NOTE*: Mul supports broadcasting. More about broadcasting + here

                    multinomial Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 t

                    logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + represents the unnormalized log probabilities for all classes.

                    -> Tensor v'2 Int32

                    num_samples: 0-D. Number of independent samples to draw for each row slice.

                    -> m' (Tensor Value Int64)

                    output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` + contains the drawn class labels with range `[0, num_classes)`.

                    Draws samples from a multinomial distribution.

                    multinomial' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + represents the unnormalized log probabilities for all classes.

                    -> Tensor v'2 Int32

                    num_samples: 0-D. Number of independent samples to draw for each row slice.

                    -> m' (Tensor Value Int64)

                    output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` + contains the drawn class labels with range `[0, num_classes)`.

                    mutableDenseHashTable Source #

                    Arguments

                    :: (MonadBuild m', TensorType key_dtype) 
                    => DataType

                    value_dtype: Type of the table values.

                    -> Tensor v'1 key_dtype

                    empty_key: The key used to represent empty key buckets internally. Must not + be used in insert or lookup operations.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    Creates an empty hash table that uses tensors as the backing store.

                    It uses "open addressing" with quadratic reprobing to resolve + collisions.

                    This op creates a mutable hash table, specifying the type of its keys and values. Each value must be a scalar. Data can be inserted into the table using - the insert operations. It does not support the initialization operation.

                    mutableDenseHashTable'

                    Arguments

                    :: (MonadBuild m', TensorType key_dtype) 
                    => OpParams 
                    -> DataType

                    value_dtype: Type of the table values.

                    -> Tensor v'1 key_dtype

                    empty_key: The key used to represent empty key buckets internally. Must not - be used in insert or lookup operations.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    mutableHashTable

                    Arguments

                    :: MonadBuild m' 
                    => DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    Creates an empty hash table.

                    This op creates a mutable hash table, specifying the type of its keys and + the insert operations. It does not support the initialization operation.

                    mutableDenseHashTable' Source #

                    Arguments

                    :: (MonadBuild m', TensorType key_dtype) 
                    => OpParams 
                    -> DataType

                    value_dtype: Type of the table values.

                    -> Tensor v'1 key_dtype

                    empty_key: The key used to represent empty key buckets internally. Must not + be used in insert or lookup operations.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    mutableDenseHashTableV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorType key_dtype) 
                    => DataType

                    value_dtype: Type of the table values.

                    -> Tensor v'1 key_dtype

                    empty_key: The key used to represent empty key buckets internally. Must not + be used in insert or lookup operations.

                    -> m' (Tensor Value ResourceHandle)

                    table_handle: Handle to a table.

                    Creates an empty hash table that uses tensors as the backing store.

                    It uses "open addressing" with quadratic reprobing to resolve + collisions.

                    This op creates a mutable hash table, specifying the type of its keys and values. Each value must be a scalar. Data can be inserted into the table using - the insert operations. It does not support the initialization operation.

                    mutableHashTable'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    mutableHashTableOfTensors

                    Arguments

                    :: MonadBuild m' 
                    => DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    Creates an empty hash table.

                    This op creates a mutable hash table, specifying the type of its keys and + the insert operations. It does not support the initialization operation.

                    mutableDenseHashTableV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorType key_dtype) 
                    => OpParams 
                    -> DataType

                    value_dtype: Type of the table values.

                    -> Tensor v'1 key_dtype

                    empty_key: The key used to represent empty key buckets internally. Must not + be used in insert or lookup operations.

                    -> m' (Tensor Value ResourceHandle)

                    table_handle: Handle to a table.

                    mutableHashTable Source #

                    Arguments

                    :: MonadBuild m' 
                    => DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    Creates an empty hash table.

                    This op creates a mutable hash table, specifying the type of its keys and + values. Each value must be a scalar. Data can be inserted into the table using + the insert operations. It does not support the initialization operation.

                    mutableHashTable' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    mutableHashTableOfTensors Source #

                    Arguments

                    :: MonadBuild m' 
                    => DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    Creates an empty hash table.

                    This op creates a mutable hash table, specifying the type of its keys and values. Each value must be a vector. Data can be inserted into the table using - the insert operations. It does not support the initialization operation.

                    mutableHashTableOfTensors'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    neg

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    Computes numerical negative value element-wise.

                    I.e., \(y = -x\).

                    negTrain

                    Arguments

                    :: MonadBuild m' 
                    => Int64

                    num_negative_samples: Number of negative samples per example.

                    -> Tensor Ref Float

                    w_in: input word embedding.

                    -> Tensor Ref Float

                    w_out: output word embedding.

                    -> Tensor v'3 Int32

                    examples: A vector of word ids.

                    -> Tensor v'4 Int32

                    labels: A vector of word ids.

                    -> Tensor v'5 Float

                    lr

                    -> m' ControlNode 

                    Training via negative sampling.

                    negTrain'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Int64

                    num_negative_samples: Number of negative samples per example.

                    -> Tensor Ref Float

                    w_in: input word embedding.

                    -> Tensor Ref Float

                    w_out: output word embedding.

                    -> Tensor v'3 Int32

                    examples: A vector of word ids.

                    -> Tensor v'4 Int32

                    labels: A vector of word ids.

                    -> Tensor v'5 Float

                    lr

                    -> m' ControlNode 

                    nextIteration

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    data: The tensor to be made available to the next iteration.

                    -> Tensor Build t

                    output: The same tensor as `data`.

                    Makes its input available to the next iteration.

                    nextIteration'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    data: The tensor to be made available to the next iteration.

                    -> Tensor Build t

                    output: The same tensor as `data`.

                    noOp :: forall m'. MonadBuild m' => m' ControlNode

                    Does nothing. Only useful as a placeholder for control edges.

                    noOp' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode

                    nonMaxSuppression

                    Arguments

                    :: Tensor v'1 Float

                    boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

                    -> Tensor v'2 Float

                    scores: A 1-D float tensor of shape `[num_boxes]` representing a single - score corresponding to each box (each row of boxes).

                    -> Tensor v'3 Int32

                    max_output_size: A scalar integer tensor representing the maximum number of - boxes to be selected by non max suppression.

                    -> Tensor Build Int32

                    selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + the insert operations. It does not support the initialization operation.

                    mutableHashTableOfTensors' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Ref ByteString)

                    table_handle: Handle to a table.

                    mutableHashTableOfTensorsV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Value ResourceHandle)

                    table_handle: Handle to a table.

                    Creates an empty hash table.

                    This op creates a mutable hash table, specifying the type of its keys and + values. Each value must be a vector. Data can be inserted into the table using + the insert operations. It does not support the initialization operation.

                    mutableHashTableOfTensorsV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Value ResourceHandle)

                    table_handle: Handle to a table.

                    mutableHashTableV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Value ResourceHandle)

                    table_handle: Handle to a table.

                    Creates an empty hash table.

                    This op creates a mutable hash table, specifying the type of its keys and + values. Each value must be a scalar. Data can be inserted into the table using + the insert operations. It does not support the initialization operation.

                    mutableHashTableV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> DataType

                    key_dtype: Type of the table keys.

                    -> DataType

                    value_dtype: Type of the table values.

                    -> m' (Tensor Value ResourceHandle)

                    table_handle: Handle to a table.

                    neg Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    Computes numerical negative value element-wise.

                    I.e., \(y = -x\).

                    negTrain Source #

                    Arguments

                    :: MonadBuild m' 
                    => Int64

                    num_negative_samples: Number of negative samples per example.

                    -> Tensor Ref Float

                    w_in: input word embedding.

                    -> Tensor Ref Float

                    w_out: output word embedding.

                    -> Tensor v'3 Int32

                    examples: A vector of word ids.

                    -> Tensor v'4 Int32

                    labels: A vector of word ids.

                    -> Tensor v'5 Float

                    lr

                    -> m' ControlNode 

                    Training via negative sampling.

                    negTrain' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Int64

                    num_negative_samples: Number of negative samples per example.

                    -> Tensor Ref Float

                    w_in: input word embedding.

                    -> Tensor Ref Float

                    w_out: output word embedding.

                    -> Tensor v'3 Int32

                    examples: A vector of word ids.

                    -> Tensor v'4 Int32

                    labels: A vector of word ids.

                    -> Tensor v'5 Float

                    lr

                    -> m' ControlNode 

                    nextIteration Source #

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    data: The tensor to be made available to the next iteration.

                    -> Tensor Build t

                    output: The same tensor as `data`.

                    Makes its input available to the next iteration.

                    nextIteration' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    data: The tensor to be made available to the next iteration.

                    -> Tensor Build t

                    output: The same tensor as `data`.

                    noOp :: forall m'. MonadBuild m' => m' ControlNode Source #

                    Does nothing. Only useful as a placeholder for control edges.

                    noOp' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode Source #

                    nonMaxSuppression Source #

                    Arguments

                    :: Tensor v'1 Float

                    boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

                    -> Tensor v'2 Float

                    scores: A 1-D float tensor of shape `[num_boxes]` representing a single + score corresponding to each box (each row of boxes).

                    -> Tensor v'3 Int32

                    max_output_size: A scalar integer tensor representing the maximum number of + boxes to be selected by non max suppression.

                    -> Tensor Build Int32

                    selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + indices from the boxes tensor, where `M <= max_output_size`.

                    Greedily selects a subset of bounding boxes in descending order of score,

                    pruning away boxes that have high intersection-over-union (IOU) overlap + with previously selected boxes. Bounding boxes are supplied as + [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + diagonal pair of box corners and the coordinates can be provided as normalized + (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + is agnostic to where the origin is in the coordinate system. Note that this + algorithm is invariant to orthogonal transformations and translations + of the coordinate system; thus translating or reflections of the coordinate + system result in the same boxes being selected by the algorithm. + The output of this operation is a set of integers indexing into the input + collection of bounding boxes representing the selected boxes. The bounding + box coordinates corresponding to the selected indices can then be obtained + using the `tf.gather operation`. For example: + selected_indices = tf.image.non_max_suppression( + boxes, scores, max_output_size, iou_threshold) + selected_boxes = tf.gather(boxes, selected_indices)

                    nonMaxSuppression' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Float

                    boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

                    -> Tensor v'2 Float

                    scores: A 1-D float tensor of shape `[num_boxes]` representing a single + score corresponding to each box (each row of boxes).

                    -> Tensor v'3 Int32

                    max_output_size: A scalar integer tensor representing the maximum number of + boxes to be selected by non max suppression.

                    -> Tensor Build Int32

                    selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + indices from the boxes tensor, where `M <= max_output_size`.

                    nonMaxSuppressionV2 Source #

                    Arguments

                    :: Tensor v'1 Float

                    boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

                    -> Tensor v'2 Float

                    scores: A 1-D float tensor of shape `[num_boxes]` representing a single + score corresponding to each box (each row of boxes).

                    -> Tensor v'3 Int32

                    max_output_size: A scalar integer tensor representing the maximum number of + boxes to be selected by non max suppression.

                    -> Tensor v'4 Float

                    iou_threshold: A 0-D float tensor representing the threshold for deciding whether + boxes overlap too much with respect to IOU.

                    -> Tensor Build Int32

                    selected_indices: A 1-D integer tensor of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`.

                    Greedily selects a subset of bounding boxes in descending order of score,

                    pruning away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any @@ -1648,13 +1883,14 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core system result in the same boxes being selected by the algorithm.

                    The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained - using the `tf.gather operation`. For example:

                    selected_indices = tf.image.non_max_suppression( + using the `tf.gather operation`. For example:

                    selected_indices = tf.image.non_max_suppression_v2( boxes, scores, max_output_size, iou_threshold) - selected_boxes = tf.gather(boxes, selected_indices)

                    nonMaxSuppression'

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Float

                    boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

                    -> Tensor v'2 Float

                    scores: A 1-D float tensor of shape `[num_boxes]` representing a single - score corresponding to each box (each row of boxes).

                    -> Tensor v'3 Int32

                    max_output_size: A scalar integer tensor representing the maximum number of - boxes to be selected by non max suppression.

                    -> Tensor Build Int32

                    selected_indices: A 1-D integer tensor of shape `[M]` representing the selected - indices from the boxes tensor, where `M <= max_output_size`.

                    notEqual

                    Returns the truth value of (x != y) element-wise.

                    • NOTE*: NotEqual supports broadcasting. More about broadcasting - here

                    oneHot

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64, Word8]` tI) 
                    => Tensor v'1 tI

                    indices: A tensor of indices.

                    -> Tensor v'2 Int32

                    depth: A scalar defining the depth of the one hot dimension.

                    -> Tensor v'3 t

                    on_value: A scalar defining the value to fill in output when `indices[j] = i`.

                    -> Tensor v'4 t

                    off_value: A scalar defining the value to fill in output when `indices[j] != i`.

                    -> Tensor Build t

                    output: The one-hot tensor.

                    Returns a one-hot tensor.

                    The locations represented by indices in indices take value on_value, + selected_boxes = tf.gather(boxes, selected_indices)

                    nonMaxSuppressionV2' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Float

                    boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

                    -> Tensor v'2 Float

                    scores: A 1-D float tensor of shape `[num_boxes]` representing a single + score corresponding to each box (each row of boxes).

                    -> Tensor v'3 Int32

                    max_output_size: A scalar integer tensor representing the maximum number of + boxes to be selected by non max suppression.

                    -> Tensor v'4 Float

                    iou_threshold: A 0-D float tensor representing the threshold for deciding whether + boxes overlap too much with respect to IOU.

                    -> Tensor Build Int32

                    selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + indices from the boxes tensor, where `M <= max_output_size`.

                    notEqual Source #

                    Returns the truth value of (x != y) element-wise.

                    • NOTE*: NotEqual supports broadcasting. More about broadcasting + here

                    oneHot Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) 
                    => Tensor v'1 tI

                    indices: A tensor of indices.

                    -> Tensor v'2 Int32

                    depth: A scalar defining the depth of the one hot dimension.

                    -> Tensor v'3 t

                    on_value: A scalar defining the value to fill in output when `indices[j] = i`.

                    -> Tensor v'4 t

                    off_value: A scalar defining the value to fill in output when `indices[j] != i`.

                    -> Tensor Build t

                    output: The one-hot tensor.

                    Returns a one-hot tensor.

                    The locations represented by indices in indices take value on_value, while all other locations take value off_value.

                    If the input indices is rank N, the output will have rank `N+1`, The new axis is created at dimension axis (default: the new axis is appended at the end).

                    If indices is a scalar the output shape will be a vector of length depth.

                    If indices is a vector of length features, the output shape will be: @@ -1708,22 +1944,28 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // one_hot(-1) - ]```

                    oneHot'

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64, Word8]` tI) 
                    => OpParams 
                    -> Tensor v'1 tI

                    indices: A tensor of indices.

                    -> Tensor v'2 Int32

                    depth: A scalar defining the depth of the one hot dimension.

                    -> Tensor v'3 t

                    on_value: A scalar defining the value to fill in output when `indices[j] = i`.

                    -> Tensor v'4 t

                    off_value: A scalar defining the value to fill in output when `indices[j] != i`.

                    -> Tensor Build t

                    output: The one-hot tensor.

                    pack

                    Arguments

                    :: TensorType t 
                    => [Tensor v'1 t]

                    values: Must be of same shape and type.

                    -> Tensor Build t

                    output: The packed tensor.

                    Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

                    Packs the N tensors in values into a tensor with rank one higher than each + ]```

                    oneHot' Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) 
                    => OpParams 
                    -> Tensor v'1 tI

                    indices: A tensor of indices.

                    -> Tensor v'2 Int32

                    depth: A scalar defining the depth of the one hot dimension.

                    -> Tensor v'3 t

                    on_value: A scalar defining the value to fill in output when `indices[j] = i`.

                    -> Tensor v'4 t

                    off_value: A scalar defining the value to fill in output when `indices[j] != i`.

                    -> Tensor Build t

                    output: The one-hot tensor.

                    onesLike Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t 
                    => Tensor v'1 t

                    x: a tensor of type T.

                    -> Tensor Build t

                    y: a tensor of the same shape and type as x but filled with ones.

                    Returns a tensor of ones with the same shape and type as x.

                    onesLike' Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    x: a tensor of type T.

                    -> Tensor Build t

                    y: a tensor of the same shape and type as x but filled with ones.

                    orderedMapClear Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    dtypes

                    -> m' ControlNode 

                    Op removes all elements in the underlying container.

                    orderedMapClear' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    dtypes

                    -> m' ControlNode 

                    orderedMapIncompleteSize Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    dtypes

                    -> m' (Tensor Value Int32)

                    size

                    Op returns the number of incomplete elements in the underlying container.

                    orderedMapPeek Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => Tensor v'1 Int64

                    key

                    -> Tensor v'2 Int32

                    indices

                    -> m' (TensorList Value dtypes)

                    values

                    Op peeks at the values at the specified key. If the

                    underlying container does not contain this key + this op will block until it does. This Op is optimized for + performance.

                    orderedMapPeek' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => OpParams 
                    -> Tensor v'1 Int64

                    key

                    -> Tensor v'2 Int32

                    indices

                    -> m' (TensorList Value dtypes)

                    values

                    orderedMapSize Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    dtypes

                    -> m' (Tensor Value Int32)

                    size

                    Op returns the number of elements in the underlying container.

                    orderedMapSize' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    dtypes

                    -> m' (Tensor Value Int32)

                    size

                    orderedMapStage Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes fake_dtypes) 
                    => [DataType]

                    dtypes

                    -> Tensor v'1 Int64

                    key: int64

                    -> Tensor v'2 Int32

                    indices

                    -> TensorList v'3 fake_dtypes

                    values: a list of tensors + dtypes A list of data types that inserted values should adhere to.

                    -> m' ControlNode 

                    Stage (key, values) in the underlying container which behaves like a ordered

                    associative container. Elements are ordered by key.

                    orderedMapStage' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes fake_dtypes) 
                    => OpParams 
                    -> [DataType]

                    dtypes

                    -> Tensor v'1 Int64

                    key: int64

                    -> Tensor v'2 Int32

                    indices

                    -> TensorList v'3 fake_dtypes

                    values: a list of tensors + dtypes A list of data types that inserted values should adhere to.

                    -> m' ControlNode 

                    orderedMapUnstage Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => Tensor v'1 Int64

                    key

                    -> Tensor v'2 Int32

                    indices

                    -> m' (TensorList Value dtypes)

                    values

                    Op removes and returns the values associated with the key

                    from the underlying container. If the underlying container + does not contain this key, the op will block until it does.

                    orderedMapUnstage' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => OpParams 
                    -> Tensor v'1 Int64

                    key

                    -> Tensor v'2 Int32

                    indices

                    -> m' (TensorList Value dtypes)

                    values

                    orderedMapUnstageNoKey Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => Tensor v'1 Int32

                    indices

                    -> m' (Tensor Value Int64, TensorList Value dtypes)

                    (key, values)

                    • key
                    • values

                    Op removes and returns the (key, value) element with the smallest

                    key from the underlying container. If the underlying container + does not contain elements, the op will block until it does.

                    orderedMapUnstageNoKey' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => OpParams 
                    -> Tensor v'1 Int32

                    indices

                    -> m' (Tensor Value Int64, TensorList Value dtypes)

                    (key, values)

                    • key
                    • values

                    pack Source #

                    Arguments

                    :: TensorType t 
                    => [Tensor v'1 t]

                    values: Must be of same shape and type.

                    -> Tensor Build t

                    output: The packed tensor.

                    Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

                    Packs the N tensors in values into a tensor with rank one higher than each tensor in values, by packing them along the axis dimension. Given a list of tensors of shape `(A, B, C)`;

                    if `axis == 0` then the output tensor will have the shape `(N, A, B, C)`. if `axis == 1` then the output tensor will have the shape `(A, N, B, C)`. - Etc.

                    For example:

                    ```prettyprint + Etc.

                    For example:

                    ``` # x is [1, 4] # y is [2, 5] # z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] - ```

                    This is the opposite of unpack.

                    pack'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> [Tensor v'1 t]

                    values: Must be of same shape and type.

                    -> Tensor Build t

                    output: The packed tensor.

                    pad

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
                    => Tensor v'1 t

                    input

                    -> Tensor v'2 tpaddings

                    paddings

                    -> Tensor Build t

                    output

                    Pads a tensor with zeros.

                    This operation pads a input with zeros according to the paddings you + ```

                    This is the opposite of unpack.

                    pack' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> [Tensor v'1 t]

                    values: Must be of same shape and type.

                    -> Tensor Build t

                    output: The packed tensor.

                    pad Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                    => Tensor v'1 t

                    input

                    -> Tensor v'2 tpaddings

                    paddings

                    -> Tensor Build t

                    output

                    Pads a tensor with zeros.

                    This operation pads a input with zeros according to the paddings you specify. paddings is an integer tensor with shape `[Dn, 2]`, where n is the rank of input. For each dimension D of input, `paddings[D, 0]` indicates how many zeros to add before the contents of input in that dimension, and `paddings[D, 1]` indicates how many zeros to add after the contents of input - in that dimension.

                    The padded size of each dimension D of the output is:

                    `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

                    For example:

                    ```prettyprint + in that dimension.

                    The padded size of each dimension D of the output is:

                    `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

                    For example:

                    ``` # t is [[1, 1], [2, 2]] # paddings is [[1, 1], [2, 2]] # rank of t is 2 @@ -1731,13 +1973,38 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, 0, 0]] - ```

                    pad'

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor v'2 tpaddings

                    paddings

                    -> Tensor Build t

                    output

                    paddingFIFOQueue

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    A queue that produces elements in first-in first-out order.

                    Variable-size shapes are allowed by setting the corresponding shape dimensions + ```

                    pad' Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor v'2 tpaddings

                    paddings

                    -> Tensor Build t

                    output

                    padV2 Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                    => Tensor v'1 t

                    input

                    -> Tensor v'2 tpaddings

                    paddings

                    -> Tensor v'3 t

                    constant_values

                    -> Tensor Build t

                    output

                    Pads a tensor.

                    This operation pads input according to the paddings and constant_values + you specify. paddings is an integer tensor with shape `[Dn, 2]`, where n is + the rank of input. For each dimension D of input, `paddings[D, 0]` indicates + how many padding values to add before the contents of input in that dimension, + and `paddings[D, 1]` indicates how many padding values to add after the contents + of input in that dimension. constant_values is a scalar tensor of the same + type as input that indicates the value to use for padding input.

                    The padded size of each dimension D of the output is:

                    `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

                    For example:

                    ``` + # t is [[1, 1], [2, 2]] + # paddings is [[1, 1], [2, 2]] + # constant_values is 0 + # rank of t is 2 + pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + [0, 0, 1, 1, 0, 0] + [0, 0, 2, 2, 0, 0] + [0, 0, 0, 0, 0, 0]] + ```

                    padV2' Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor v'2 tpaddings

                    paddings

                    -> Tensor v'3 t

                    constant_values

                    -> Tensor Build t

                    output

                    paddedBatchDataset Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes toutput_types) 
                    => Tensor v'1 ResourceHandle

                    input_dataset

                    -> Tensor v'2 Int64

                    batch_size: A scalar representing the number of elements to accumulate in a + batch.

                    -> [Tensor v'3 Int64]

                    padded_shapes: A list of int64 tensors representing the desired padded shapes + of the corresponding output components. These shapes may be partially + specified, using `-1` to indicate that a particular dimension should be + padded to the maximum size of all batch elements.

                    -> TensorList v'4 toutput_types

                    padding_values: A list of scalars containing the padding value to use for + each of the outputs.

                    -> m' (Tensor Value ResourceHandle)

                    handle

                    Creates a dataset that batches and pads batch_size elements from the input.

                    paddedBatchDataset' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes toutput_types) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    input_dataset

                    -> Tensor v'2 Int64

                    batch_size: A scalar representing the number of elements to accumulate in a + batch.

                    -> [Tensor v'3 Int64]

                    padded_shapes: A list of int64 tensors representing the desired padded shapes + of the corresponding output components. These shapes may be partially + specified, using `-1` to indicate that a particular dimension should be + padded to the maximum size of all batch elements.

                    -> TensorList v'4 toutput_types

                    padding_values: A list of scalars containing the padding value to use for + each of the outputs.

                    -> m' (Tensor Value ResourceHandle)

                    handle

                    paddingFIFOQueue Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    A queue that produces elements in first-in first-out order.

                    Variable-size shapes are allowed by setting the corresponding shape dimensions to 0 in the shape attr. In this case DequeueMany will pad up to the maximum - size of any given element in the minibatch. See below for details.

                    paddingFIFOQueue'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    paddingFIFOQueueV2

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    component_types: The type of each component in a value.

                    -> m' ResourceHandle

                    handle: The handle to the queue.

                    A queue that produces elements in first-in first-out order.

                    Variable-size shapes are allowed by setting the corresponding shape dimensions + size of any given element in the minibatch. See below for details.

                    paddingFIFOQueue' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    paddingFIFOQueueV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Value ResourceHandle)

                    handle: The handle to the queue.

                    A queue that produces elements in first-in first-out order.

                    Variable-size shapes are allowed by setting the corresponding shape dimensions to 0 in the shape attr. In this case DequeueMany will pad up to the maximum - size of any given element in the minibatch. See below for details.

                    paddingFIFOQueueV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    component_types: The type of each component in a value.

                    -> m' ResourceHandle

                    handle: The handle to the queue.

                    parallelConcat

                    Arguments

                    :: TensorType t 
                    => Shape

                    shape: the final shape of the result; should be equal to the shapes of any input - but with the number of input values in the first dimension.

                    -> [Tensor v'1 t]

                    values: Tensors to be concatenated. All must have size 1 in the first dimension - and same shape.

                    -> Tensor Build t

                    output: The concatenated tensor.

                    Concatenates a list of N tensors along the first dimension.

                    The input tensors are all required to have size 1 in the first dimension.

                    For example:

                    ```prettyprint + size of any given element in the minibatch. See below for details.

                    paddingFIFOQueueV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Value ResourceHandle)

                    handle: The handle to the queue.

                    parallelConcat Source #

                    Arguments

                    :: TensorType t 
                    => Shape

                    shape: the final shape of the result; should be equal to the shapes of any input + but with the number of input values in the first dimension.

                    -> [Tensor v'1 t]

                    values: Tensors to be concatenated. All must have size 1 in the first dimension + and same shape.

                    -> Tensor Build t

                    output: The concatenated tensor.

                    Concatenates a list of N tensors along the first dimension.

                    The input tensors are all required to have size 1 in the first dimension.

                    For example:

                    ``` # x is [[1, 4]] # y is [[2, 5]] # z is [[3, 6]] @@ -1746,126 +2013,133 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core of the inputs be computed before the operation will begin but doesn't require that the input shapes be known during graph construction. Parallel concat will copy pieces of the input into the output as they become available, in - some situations this can provide a performance benefit.

                    parallelConcat'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Shape

                    shape: the final shape of the result; should be equal to the shapes of any input - but with the number of input values in the first dimension.

                    -> [Tensor v'1 t]

                    values: Tensors to be concatenated. All must have size 1 in the first dimension - and same shape.

                    -> Tensor Build t

                    output: The concatenated tensor.

                    parameterizedTruncatedNormal

                    Arguments

                    :: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
                    => Tensor v'1 t

                    shape: The shape of the output tensor. Batches are indexed by the 0th dimension.

                    -> Tensor v'2 dtype

                    means: The mean parameter of each batch.

                    -> Tensor v'3 dtype

                    stdevs: The standard deviation parameter of each batch. Must be greater than 0.

                    -> Tensor v'4 dtype

                    minvals: The minimum cutoff. May be -infinity.

                    -> Tensor v'5 dtype

                    maxvals: The maximum cutoff. May be +infinity, and must be more than the minval - for each batch.

                    -> m' (Tensor Value dtype)

                    output: A matrix of shape num_batches x samples_per_batch, filled with random + some situations this can provide a performance benefit.

                    parallelConcat' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Shape

                    shape: the final shape of the result; should be equal to the shapes of any input + but with the number of input values in the first dimension.

                    -> [Tensor v'1 t]

                    values: Tensors to be concatenated. All must have size 1 in the first dimension + and same shape.

                    -> Tensor Build t

                    output: The concatenated tensor.

                    parameterizedTruncatedNormal Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                    => Tensor v'1 t

                    shape: The shape of the output tensor. Batches are indexed by the 0th dimension.

                    -> Tensor v'2 dtype

                    means: The mean parameter of each batch.

                    -> Tensor v'3 dtype

                    stdevs: The standard deviation parameter of each batch. Must be greater than 0.

                    -> Tensor v'4 dtype

                    minvals: The minimum cutoff. May be -infinity.

                    -> Tensor v'5 dtype

                    maxvals: The maximum cutoff. May be +infinity, and must be more than the minval + for each batch.

                    -> m' (Tensor Value dtype)

                    output: A matrix of shape num_batches x samples_per_batch, filled with random truncated normal values using the parameters for each row.

                    Outputs random values from a normal distribution. The parameters may each be a

                    scalar which applies to the entire output, or a vector of length shape[0] which - stores the parameters for each batch.

                    parameterizedTruncatedNormal'

                    Arguments

                    :: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
                    => OpParams 
                    -> Tensor v'1 t

                    shape: The shape of the output tensor. Batches are indexed by the 0th dimension.

                    -> Tensor v'2 dtype

                    means: The mean parameter of each batch.

                    -> Tensor v'3 dtype

                    stdevs: The standard deviation parameter of each batch. Must be greater than 0.

                    -> Tensor v'4 dtype

                    minvals: The minimum cutoff. May be -infinity.

                    -> Tensor v'5 dtype

                    maxvals: The maximum cutoff. May be +infinity, and must be more than the minval - for each batch.

                    -> m' (Tensor Value dtype)

                    output: A matrix of shape num_batches x samples_per_batch, filled with random - truncated normal values using the parameters for each row.

                    parseExample

                    Arguments

                    :: (OneOfs `[ByteString, Int64, Float]` sparse_types, OneOfs `[ByteString, Int64, Float]` tdense) 
                    => Tensor v'1 ByteString

                    serialized: A vector containing a batch of binary serialized Example protos.

                    -> Tensor v'2 ByteString

                    names: A vector containing the names of the serialized protos. + stores the parameters for each batch.

                    parameterizedTruncatedNormal' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    shape: The shape of the output tensor. Batches are indexed by the 0th dimension.

                    -> Tensor v'2 dtype

                    means: The mean parameter of each batch.

                    -> Tensor v'3 dtype

                    stdevs: The standard deviation parameter of each batch. Must be greater than 0.

                    -> Tensor v'4 dtype

                    minvals: The minimum cutoff. May be -infinity.

                    -> Tensor v'5 dtype

                    maxvals: The maximum cutoff. May be +infinity, and must be more than the minval + for each batch.

                    -> m' (Tensor Value dtype)

                    output: A matrix of shape num_batches x samples_per_batch, filled with random + truncated normal values using the parameters for each row.

                    parseExample Source #

                    Arguments

                    :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) 
                    => Tensor v'1 ByteString

                    serialized: A vector containing a batch of binary serialized Example protos.

                    -> Tensor v'2 ByteString

                    names: A vector containing the names of the serialized protos. May contain, for example, table key (descriptive) names for the corresponding serialized protos. These are purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty vector if no names are available. - If non-empty, this vector must be the same length as "serialized".

                    -> [Tensor v'3 ByteString]

                    sparse_keys: A list of Nsparse string Tensors (scalars). - The keys expected in the Examples' features associated with sparse values.

                    -> [Tensor v'4 ByteString]

                    dense_keys: A list of Ndense string Tensors (scalars). - The keys expected in the Examples' features associated with dense values.

                    -> TensorList v'5 tdense

                    dense_defaults: A list of Ndense Tensors (some may be empty). + If non-empty, this vector must be the same length as "serialized".

                    -> [Tensor v'3 ByteString]

                    sparse_keys: A list of Nsparse string Tensors (scalars). + The keys expected in the Examples' features associated with sparse values.

                    -> [Tensor v'4 ByteString]

                    dense_keys: A list of Ndense string Tensors (scalars). + The keys expected in the Examples' features associated with dense values.

                    -> TensorList v'5 tdense

                    dense_defaults: A list of Ndense Tensors (some may be empty). dense_defaults[j] provides default values when the example's feature_map lacks dense_key[j]. If an empty Tensor is provided for dense_defaults[j], then the Feature dense_keys[j] is required. The input type is inferred from dense_defaults[j], even when it's empty. - If dense_defaults[j] is not empty, its shape must match dense_shapes[j].

                    -> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense)

                    (sparse_indices, sparse_values, sparse_shapes, dense_values)

                    • sparse_indices
                    • sparse_values
                    • sparse_shapes
                    • dense_values

                    Transforms a vector of brain.Example protos (as strings) into typed tensors.

                    parseExample'

                    Arguments

                    :: (OneOfs `[ByteString, Int64, Float]` sparse_types, OneOfs `[ByteString, Int64, Float]` tdense) 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    serialized: A vector containing a batch of binary serialized Example protos.

                    -> Tensor v'2 ByteString

                    names: A vector containing the names of the serialized protos. + If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + then the shape of dense_defaults[j] must match that of dense_shapes[j]. + If dense_shapes[j] has an undefined major dimension (variable strides dense + feature), dense_defaults[j] must contain a single element: + the padding element.

                    -> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense)

                    (sparse_indices, sparse_values, sparse_shapes, dense_values)

                    • sparse_indices
                    • sparse_values
                    • sparse_shapes
                    • dense_values

                    Transforms a vector of brain.Example protos (as strings) into typed tensors.

                    parseExample' Source #

                    Arguments

                    :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    serialized: A vector containing a batch of binary serialized Example protos.

                    -> Tensor v'2 ByteString

                    names: A vector containing the names of the serialized protos. May contain, for example, table key (descriptive) names for the corresponding serialized protos. These are purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty vector if no names are available. - If non-empty, this vector must be the same length as "serialized".

                    -> [Tensor v'3 ByteString]

                    sparse_keys: A list of Nsparse string Tensors (scalars). - The keys expected in the Examples' features associated with sparse values.

                    -> [Tensor v'4 ByteString]

                    dense_keys: A list of Ndense string Tensors (scalars). - The keys expected in the Examples' features associated with dense values.

                    -> TensorList v'5 tdense

                    dense_defaults: A list of Ndense Tensors (some may be empty). + If non-empty, this vector must be the same length as "serialized".

                    -> [Tensor v'3 ByteString]

                    sparse_keys: A list of Nsparse string Tensors (scalars). + The keys expected in the Examples' features associated with sparse values.

                    -> [Tensor v'4 ByteString]

                    dense_keys: A list of Ndense string Tensors (scalars). + The keys expected in the Examples' features associated with dense values.

                    -> TensorList v'5 tdense

                    dense_defaults: A list of Ndense Tensors (some may be empty). dense_defaults[j] provides default values when the example's feature_map lacks dense_key[j]. If an empty Tensor is provided for dense_defaults[j], then the Feature dense_keys[j] is required. The input type is inferred from dense_defaults[j], even when it's empty. - If dense_defaults[j] is not empty, its shape must match dense_shapes[j].

                    -> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense)

                    (sparse_indices, sparse_values, sparse_shapes, dense_values)

                    • sparse_indices
                    • sparse_values
                    • sparse_shapes
                    • dense_values

                    parseSingleSequenceExample

                    Arguments

                    :: (OneOfs `[ByteString, Int64, Float]` context_sparse_types, OneOfs `[ByteString, Int64, Float]` tcontext_dense, OneOfs `[ByteString, Int64, Float]` feature_list_dense_types, OneOfs `[ByteString, Int64, Float]` feature_list_sparse_types) 
                    => Tensor v'1 ByteString

                    serialized: A scalar containing a binary serialized SequenceExample proto.

                    -> Tensor v'2 ByteString

                    feature_list_dense_missing_assumed_empty: A vector listing the + If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + then the shape of dense_defaults[j] must match that of dense_shapes[j]. + If dense_shapes[j] has an undefined major dimension (variable strides dense + feature), dense_defaults[j] must contain a single element: + the padding element.

                    -> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense)

                    (sparse_indices, sparse_values, sparse_shapes, dense_values)

                    • sparse_indices
                    • sparse_values
                    • sparse_shapes
                    • dense_values

                    parseSingleSequenceExample Source #

                    Arguments

                    :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) 
                    => Tensor v'1 ByteString

                    serialized: A scalar containing a binary serialized SequenceExample proto.

                    -> Tensor v'2 ByteString

                    feature_list_dense_missing_assumed_empty: A vector listing the FeatureList keys which may be missing from the SequenceExample. If the associated FeatureList is missing, it is treated as empty. By default, - any FeatureList not listed in this vector must exist in the SequenceExample.

                    -> [Tensor v'3 ByteString]

                    context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). + any FeatureList not listed in this vector must exist in the SequenceExample.

                    -> [Tensor v'3 ByteString]

                    context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). The keys expected in the Examples' features associated with context_sparse - values.

                    -> [Tensor v'4 ByteString]

                    context_dense_keys: A list of Ncontext_dense string Tensors (scalars). + values.

                    -> [Tensor v'4 ByteString]

                    context_dense_keys: A list of Ncontext_dense string Tensors (scalars). The keys expected in the SequenceExamples' context features associated with - dense values.

                    -> [Tensor v'5 ByteString]

                    feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors + dense values.

                    -> [Tensor v'5 ByteString]

                    feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors (scalars). The keys expected in the FeatureLists associated with sparse - values.

                    -> [Tensor v'6 ByteString]

                    feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). + values.

                    -> [Tensor v'6 ByteString]

                    feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). The keys expected in the SequenceExamples' feature_lists associated - with lists of dense values.

                    -> TensorList v'7 tcontext_dense

                    context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). + with lists of dense values.

                    -> TensorList v'7 tcontext_dense

                    context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). context_dense_defaults[j] provides default values when the SequenceExample's context map lacks context_dense_key[j]. If an empty Tensor is provided for context_dense_defaults[j], then the Feature context_dense_keys[j] is required. The input type is inferred from context_dense_defaults[j], even when it's empty. If context_dense_defaults[j] is not empty, its shape must match - context_dense_shapes[j].

                    -> Tensor v'8 ByteString

                    debug_name: A scalar containing the name of the serialized proto. + context_dense_shapes[j].

                    -> Tensor v'8 ByteString

                    debug_name: A scalar containing the name of the serialized proto. May contain, for example, table key (descriptive) name for the corresponding serialized proto. This is purely useful for debugging purposes, and the presence of values here has no effect on the output. - May also be an empty scalar if no name is available.

                    -> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types)

                    (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values)

                    • context_sparse_indices
                    • context_sparse_values
                    • context_sparse_shapes
                    • context_dense_values
                    • feature_list_sparse_indices
                    • feature_list_sparse_values
                    • feature_list_sparse_shapes
                    • feature_list_dense_values

                    Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.

                    parseSingleSequenceExample'

                    Arguments

                    :: (OneOfs `[ByteString, Int64, Float]` context_sparse_types, OneOfs `[ByteString, Int64, Float]` tcontext_dense, OneOfs `[ByteString, Int64, Float]` feature_list_dense_types, OneOfs `[ByteString, Int64, Float]` feature_list_sparse_types) 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    serialized: A scalar containing a binary serialized SequenceExample proto.

                    -> Tensor v'2 ByteString

                    feature_list_dense_missing_assumed_empty: A vector listing the + May also be an empty scalar if no name is available.

                    -> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types)

                    (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values)

                    • context_sparse_indices
                    • context_sparse_values
                    • context_sparse_shapes
                    • context_dense_values
                    • feature_list_sparse_indices
                    • feature_list_sparse_values
                    • feature_list_sparse_shapes
                    • feature_list_dense_values

                    Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.

                    parseSingleSequenceExample' Source #

                    Arguments

                    :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    serialized: A scalar containing a binary serialized SequenceExample proto.

                    -> Tensor v'2 ByteString

                    feature_list_dense_missing_assumed_empty: A vector listing the FeatureList keys which may be missing from the SequenceExample. If the associated FeatureList is missing, it is treated as empty. By default, - any FeatureList not listed in this vector must exist in the SequenceExample.

                    -> [Tensor v'3 ByteString]

                    context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). + any FeatureList not listed in this vector must exist in the SequenceExample.

                    -> [Tensor v'3 ByteString]

                    context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). The keys expected in the Examples' features associated with context_sparse - values.

                    -> [Tensor v'4 ByteString]

                    context_dense_keys: A list of Ncontext_dense string Tensors (scalars). + values.

                    -> [Tensor v'4 ByteString]

                    context_dense_keys: A list of Ncontext_dense string Tensors (scalars). The keys expected in the SequenceExamples' context features associated with - dense values.

                    -> [Tensor v'5 ByteString]

                    feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors + dense values.

                    -> [Tensor v'5 ByteString]

                    feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors (scalars). The keys expected in the FeatureLists associated with sparse - values.

                    -> [Tensor v'6 ByteString]

                    feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). + values.

                    -> [Tensor v'6 ByteString]

                    feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). The keys expected in the SequenceExamples' feature_lists associated - with lists of dense values.

                    -> TensorList v'7 tcontext_dense

                    context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). + with lists of dense values.

                    -> TensorList v'7 tcontext_dense

                    context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). context_dense_defaults[j] provides default values when the SequenceExample's context map lacks context_dense_key[j]. If an empty Tensor is provided for context_dense_defaults[j], then the Feature context_dense_keys[j] is required. The input type is inferred from context_dense_defaults[j], even when it's empty. If context_dense_defaults[j] is not empty, its shape must match - context_dense_shapes[j].

                    -> Tensor v'8 ByteString

                    debug_name: A scalar containing the name of the serialized proto. + context_dense_shapes[j].

                    -> Tensor v'8 ByteString

                    debug_name: A scalar containing the name of the serialized proto. May contain, for example, table key (descriptive) name for the corresponding serialized proto. This is purely useful for debugging purposes, and the presence of values here has no effect on the output. - May also be an empty scalar if no name is available.

                    -> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types)

                    (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values)

                    • context_sparse_indices
                    • context_sparse_values
                    • context_sparse_shapes
                    • context_dense_values
                    • feature_list_sparse_indices
                    • feature_list_sparse_values
                    • feature_list_sparse_shapes
                    • feature_list_dense_values

                    parseTensor

                    Arguments

                    :: TensorType out_type 
                    => Tensor v'1 ByteString

                    serialized: A scalar string containing a serialized TensorProto proto.

                    -> Tensor Build out_type

                    output: A Tensor of type out_type.

                    Transforms a serialized tensorflow.TensorProto proto into a Tensor.

                    parseTensor'

                    Arguments

                    :: TensorType out_type 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    serialized: A scalar string containing a serialized TensorProto proto.

                    -> Tensor Build out_type

                    output: A Tensor of type out_type.

                    placeholder

                    Arguments

                    :: TensorType dtype 
                    => Tensor Build dtype

                    output: A placeholder tensor that must be replaced using the feed mechanism.

                    A placeholder op for a value that will be fed into the computation.

                    N.B. This operation will fail with an error if it is executed. It is + May also be an empty scalar if no name is available.

                    -> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types)

                    (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values)

                    • context_sparse_indices
                    • context_sparse_values
                    • context_sparse_shapes
                    • context_dense_values
                    • feature_list_sparse_indices
                    • feature_list_sparse_values
                    • feature_list_sparse_shapes
                    • feature_list_dense_values

                    parseTensor Source #

                    Arguments

                    :: TensorType out_type 
                    => Tensor v'1 ByteString

                    serialized: A scalar string containing a serialized TensorProto proto.

                    -> Tensor Build out_type

                    output: A Tensor of type out_type.

                    Transforms a serialized tensorflow.TensorProto proto into a Tensor.

                    parseTensor' Source #

                    Arguments

                    :: TensorType out_type 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    serialized: A scalar string containing a serialized TensorProto proto.

                    -> Tensor Build out_type

                    output: A Tensor of type out_type.

                    placeholder Source #

                    Arguments

                    :: TensorType dtype 
                    => Tensor Build dtype

                    output: A placeholder tensor that must be replaced using the feed mechanism.

                    A placeholder op for a value that will be fed into the computation.

                    N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to - provide attrs that enable the fed value to be checked at runtime.

                    placeholder'

                    Arguments

                    :: TensorType dtype 
                    => OpParams 
                    -> Tensor Build dtype

                    output: A placeholder tensor that must be replaced using the feed mechanism.

                    placeholderV2

                    Arguments

                    :: TensorType dtype 
                    => Shape

                    shape: The shape of the tensor. The shape can be any partially-specified - shape. To be unconstrained, pass in a shape with unknown rank.

                    -> Tensor Build dtype

                    output: A placeholder tensor that must be replaced using the feed mechanism.

                    A placeholder op for a value that will be fed into the computation.

                    N.B. This operation will fail with an error if it is executed. It is + provide attrs that enable the fed value to be checked at runtime.

                    placeholder' Source #

                    Arguments

                    :: TensorType dtype 
                    => OpParams 
                    -> Tensor Build dtype

                    output: A placeholder tensor that must be replaced using the feed mechanism.

                    placeholderV2 Source #

                    Arguments

                    :: TensorType dtype 
                    => Shape

                    shape: The shape of the tensor. The shape can be any partially-specified + shape. To be unconstrained, pass in a shape with unknown rank.

                    -> Tensor Build dtype

                    output: A placeholder tensor that must be replaced using the feed mechanism.

                    A placeholder op for a value that will be fed into the computation.

                    N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to - provide attrs that enable the fed value to be checked at runtime.

                    placeholderV2'

                    Arguments

                    :: TensorType dtype 
                    => OpParams 
                    -> Shape

                    shape: The shape of the tensor. The shape can be any partially-specified - shape. To be unconstrained, pass in a shape with unknown rank.

                    -> Tensor Build dtype

                    output: A placeholder tensor that must be replaced using the feed mechanism.

                    placeholderWithDefault

                    Arguments

                    :: TensorType dtype 
                    => Shape

                    shape: The (possibly partial) shape of the tensor.

                    -> Tensor v'1 dtype

                    input: The default value to produce when output is not fed.

                    -> Tensor Build dtype

                    output: A placeholder tensor that defaults to input if it is not fed.

                    A placeholder op that passes through input when its output is not fed.

                    placeholderWithDefault'

                    Arguments

                    :: TensorType dtype 
                    => OpParams 
                    -> Shape

                    shape: The (possibly partial) shape of the tensor.

                    -> Tensor v'1 dtype

                    input: The default value to produce when output is not fed.

                    -> Tensor Build dtype

                    output: A placeholder tensor that defaults to input if it is not fed.

                    polygamma

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => Tensor v'1 t

                    a

                    -> Tensor v'2 t

                    x

                    -> Tensor Build t

                    z

                    Compute the polygamma function \(psi^{(n)}(x)\).

                    The polygamma function is defined as:

                    ``` - psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) - ``` - where \(psi(x)\) is the digamma function.

                    polygamma'

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    a

                    -> Tensor v'2 t

                    x

                    -> Tensor Build t

                    z

                    pow

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Computes the power of one value to another.

                    Given a tensor x and a tensor y, this operation computes \(x^y\) for + provide attrs that enable the fed value to be checked at runtime.

                    placeholderV2' Source #

                    Arguments

                    :: TensorType dtype 
                    => OpParams 
                    -> Shape

                    shape: The shape of the tensor. The shape can be any partially-specified + shape. To be unconstrained, pass in a shape with unknown rank.

                    -> Tensor Build dtype

                    output: A placeholder tensor that must be replaced using the feed mechanism.

                    placeholderWithDefault Source #

                    Arguments

                    :: TensorType dtype 
                    => Shape

                    shape: The (possibly partial) shape of the tensor.

                    -> Tensor v'1 dtype

                    input: The default value to produce when output is not fed.

                    -> Tensor Build dtype

                    output: A placeholder tensor that defaults to input if it is not fed.

                    A placeholder op that passes through input when its output is not fed.

                    placeholderWithDefault' Source #

                    Arguments

                    :: TensorType dtype 
                    => OpParams 
                    -> Shape

                    shape: The (possibly partial) shape of the tensor.

                    -> Tensor v'1 dtype

                    input: The default value to produce when output is not fed.

                    -> Tensor Build dtype

                    output: A placeholder tensor that defaults to input if it is not fed.

                    polygamma Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => Tensor v'1 t

                    a

                    -> Tensor v'2 t

                    x

                    -> Tensor Build t

                    z

                    Compute the polygamma function \(psi^{(n)}(x)\).

                    The polygamma function is defined as:

                    \(psi^{(n)}(x) = frac{d^n}{dx^n} psi(x)\)

                    where \(psi(x)\) is the digamma function.

                    polygamma' Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    a

                    -> Tensor v'2 t

                    x

                    -> Tensor Build t

                    z

                    pow Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Computes the power of one value to another.

                    Given a tensor x and a tensor y, this operation computes \(x^y\) for corresponding elements in x and y. For example:

                    ``` # tensor x is [[2, 2]], [3, 3]] # tensor y is [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] - ```

                    pow'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    preventGradient

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input

                    -> Tensor Build t

                    output

                    An identity op that triggers an error if a gradient is requested.

                    When executed in a graph, this op outputs its input tensor as-is.

                    When building ops to compute gradients, the TensorFlow gradient system + ```

                    pow' Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    preventGradient Source #

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input: any tensor.

                    -> Tensor Build t

                    output: the same input tensor.

                    An identity op that triggers an error if a gradient is requested.

                    When executed in a graph, this op outputs its input tensor as-is.

                    When building ops to compute gradients, the TensorFlow gradient system will return an error when trying to lookup the gradient of this op, because no gradient must ever be registered for this function. This op exists to prevent subtle bugs from silently returning unimplemented - gradients in some corner cases.

                    preventGradient'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor Build t

                    output

                    print

                    Arguments

                    :: (MonadBuild m', TensorType t, TensorTypes u) 
                    => Tensor v'1 t

                    input: The tensor passed to output

                    -> TensorList v'2 u

                    data: A list of tensors to print out when op is evaluated.

                    -> m' (Tensor Value t)

                    output: = The unmodified input tensor

                    Prints a list of tensors.

                    Passes input through to output and prints `data` when evaluating.

                    print'

                    Arguments

                    :: (MonadBuild m', TensorType t, TensorTypes u) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor passed to output

                    -> TensorList v'2 u

                    data: A list of tensors to print out when op is evaluated.

                    -> m' (Tensor Value t)

                    output: = The unmodified input tensor

                    priorityQueue

                    Arguments

                    :: MonadBuild m' 
                    => m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    A queue that produces elements sorted by the first component value.

                    Note that the PriorityQueue requires the first component of any element + gradients in some corner cases.

                    preventGradient' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: any tensor.

                    -> Tensor Build t

                    output: the same input tensor.

                    print Source #

                    Arguments

                    :: (MonadBuild m', TensorType t, TensorTypes u) 
                    => Tensor v'1 t

                    input: The tensor passed to output

                    -> TensorList v'2 u

                    data: A list of tensors to print out when op is evaluated.

                    -> m' (Tensor Value t)

                    output: = The unmodified input tensor

                    Prints a list of tensors.

                    Passes input through to output and prints `data` when evaluating.

                    print' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t, TensorTypes u) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor passed to output

                    -> TensorList v'2 u

                    data: A list of tensors to print out when op is evaluated.

                    -> m' (Tensor Value t)

                    output: = The unmodified input tensor

                    priorityQueue Source #

                    Arguments

                    :: MonadBuild m' 
                    => m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    A queue that produces elements sorted by the first component value.

                    Note that the PriorityQueue requires the first component of any element to be a scalar int64, in addition to the other elements declared by component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will all require (resp. output) one extra - entry in their input (resp. output) lists.

                    priorityQueue'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    priorityQueueV2

                    Arguments

                    :: MonadBuild m' 
                    => m' ResourceHandle

                    handle: The handle to the queue.

                    A queue that produces elements sorted by the first component value.

                    Note that the PriorityQueue requires the first component of any element + entry in their input (resp. output) lists.

                    priorityQueue' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    priorityQueueV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => m' (Tensor Value ResourceHandle)

                    handle: The handle to the queue.

                    A queue that produces elements sorted by the first component value.

                    Note that the PriorityQueue requires the first component of any element to be a scalar int64, in addition to the other elements declared by component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will all require (resp. output) one extra - entry in their input (resp. output) lists.

                    priorityQueueV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> m' ResourceHandle

                    handle: The handle to the queue.

                    prod

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                    => Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    Computes the product of elements across dimensions of a tensor.

                    Reduces input along the dimensions given in reduction_indices. Unless + entry in their input (resp. output) lists.

                    priorityQueueV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> m' (Tensor Value ResourceHandle)

                    handle: The handle to the queue.

                    prod Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                    => Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    Computes the product of elements across dimensions of a tensor.

                    Reduces input along the dimensions given in reduction_indices. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

                    prod'

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    qr

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Double, Float]` t 
                    => Tensor v'1 t

                    input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions - form matrices of size `[M, N]`. Let P be the minimum of M and N.

                    -> (Tensor Build t, Tensor Build t)

                    (q, r)

                    • q: Orthonormal basis for range of a. If full_matrices is False then - shape is `[..., M, P]`; if full_matrices is True then shape is - `[..., M, M]`.
                    • r: Triangular factor. If full_matrices is False then shape is - `[..., P, N]`. If full_matrices is True then shape is `[..., M, N]`.

                    Computes the QR decompositions of one or more matrices.

                    Computes the QR decomposition of each inner matrix in tensor such that - `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`

                    ```prettyprint + retained with length 1.

                    prod' Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The tensor to reduce.

                    -> Tensor v'2 tidx

                    reduction_indices: The dimensions to reduce.

                    -> Tensor Build t

                    output: The reduced tensor.

                    qr Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => Tensor v'1 t

                    input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Let P be the minimum of M and N.

                    -> (Tensor Build t, Tensor Build t)

                    (q, r)

                    • q: Orthonormal basis for range of a. If full_matrices is False then + shape is `[..., M, P]`; if full_matrices is True then shape is + `[..., M, M]`.
                    • r: Triangular factor. If full_matrices is False then shape is + `[..., P, N]`. If full_matrices is True then shape is `[..., M, N]`.

                    Computes the QR decompositions of one or more matrices.

                    Computes the QR decomposition of each inner matrix in tensor such that + `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`

                    ```python # a is a tensor. # q is a tensor of orthonormal matrices. # r is a tensor of upper triangular matrices. q, r = qr(a) q_full, r_full = qr(a, full_matrices=True) - ```

                    qr'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions - form matrices of size `[M, N]`. Let P be the minimum of M and N.

                    -> (Tensor Build t, Tensor Build t)

                    (q, r)

                    • q: Orthonormal basis for range of a. If full_matrices is False then - shape is `[..., M, P]`; if full_matrices is True then shape is - `[..., M, M]`.
                    • r: Triangular factor. If full_matrices is False then shape is - `[..., P, N]`. If full_matrices is True then shape is `[..., M, N]`.

                    quantizeAndDequantize

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => Tensor v'1 t

                    input: Tensor to quantize and then dequantize.

                    -> Tensor Build t

                    output

                    Quantizes then dequantizes a tensor.

                    This op simulates the precision loss from the quantized forward pass by: + ```

                    qr' Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Let P be the minimum of M and N.

                    -> (Tensor Build t, Tensor Build t)

                    (q, r)

                    • q: Orthonormal basis for range of a. If full_matrices is False then + shape is `[..., M, P]`; if full_matrices is True then shape is + `[..., M, M]`.
                    • r: Triangular factor. If full_matrices is False then shape is + `[..., P, N]`. If full_matrices is True then shape is `[..., M, N]`.

                    quantizeAndDequantize Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => Tensor v'1 t

                    input

                    -> Tensor Build t

                    output

                    Use QuantizeAndDequantizeV2 instead.

                    quantizeAndDequantize' Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor Build t

                    output

                    quantizeAndDequantizeV2 Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => Tensor v'1 t

                    input: Tensor to quantize and then dequantize.

                    -> Tensor v'2 t

                    input_min: If range_given, this is the min of the range, otherwise this input + will be ignored.

                    -> Tensor v'3 t

                    input_max: If range_given, this is the max of the range, otherwise this input + will be ignored.

                    -> Tensor Build t

                    output

                    Quantizes then dequantizes a tensor.

                    This op simulates the precision loss from the quantized forward pass by: 1. Quantizing the tensor to fixed point numbers, which should match the target quantization method when it is used in inference. 2. Dequantizing it back to floating point numbers for the following ops, most @@ -1873,13 +2147,16 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core of the output type, choosing to elide the lowest possible value for symmetry (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to 0.

                    To perform this op, we first find the range of values in our tensor. The range - we use is always centered on 0, so we find m such that

                    1. m = max(abs(input_min), abs(input_max)) if range_given is true,
                    2. m = max(max(abs(min_elem(input)), abs(max_elem(input))) otherwise.

                    Our input tensor range is then [-m, m].

                    Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed]. + we use is always centered on 0, so we find m such that

                    1. m = max(abs(input_min), abs(input_max)) if range_given is true,
                    2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.

                    Our input tensor range is then [-m, m].

                    Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed]. If signed_input is true, this is

                    min_fixed, max_fixed
                    =
                    -(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1
                    .

                    Otherwise, if signed_input is false, the fixed-point range is

                    min_fixed, max_fixed
                    = [0, (1 << num_bits) - 1].

                    From this we compute our scaling factor, s:

                    s = (max_fixed - min_fixed) / (2 * m).

                    Now we can quantize and dequantize the elements of our tensor. An element e is transformed into e':

                    e' = (e * s).round_to_nearest() / s.

                    Note that we have a different number of buckets in the signed vs. unsigned cases. For example, if num_bits == 8, we get 254 buckets in the signed case vs. 255 in the unsigned case.

                    For example, suppose num_bits = 8 and m = 1. Then

                    min_fixed, max_fixed
                    = [-127, 127], and s = (127 + 127) / 2 = 127.

                    Given the vector {-1, -0.5, 0, 0.3}, this is quantized to - {-127, -63, 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}.

                    quantizeAndDequantize'

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Tensor to quantize and then dequantize.

                    -> Tensor Build t

                    output

                    quantizeDownAndShrinkRange

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: The float value that the minimum quantized output value represents.
                    • output_max: The float value that the maximum quantized output value represents.

                    Convert the quantized input tensor into a lower-precision output, using the

                    actual distribution of the values to maximize the usage of the lower bit depth + {-127, -63, 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}.

                    quantizeAndDequantizeV2' Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: Tensor to quantize and then dequantize.

                    -> Tensor v'2 t

                    input_min: If range_given, this is the min of the range, otherwise this input + will be ignored.

                    -> Tensor v'3 t

                    input_max: If range_given, this is the max of the range, otherwise this input + will be ignored.

                    -> Tensor Build t

                    output

                    quantizeAndDequantizeV3 Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => Tensor v'1 t

                    input

                    -> Tensor v'2 t

                    input_min

                    -> Tensor v'3 t

                    input_max

                    -> Tensor v'4 Int32

                    num_bits

                    -> Tensor Build t

                    output

                    Quantizes then dequantizes a tensor.

                    This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a + tensor, so its value can change during training.

                    quantizeAndDequantizeV3' Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor v'2 t

                    input_min

                    -> Tensor v'3 t

                    input_max

                    -> Tensor v'4 Int32

                    num_bits

                    -> Tensor Build t

                    output

                    quantizeDownAndShrinkRange Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: The float value that the minimum quantized output value represents.
                    • output_max: The float value that the maximum quantized output value represents.

                    Convert the quantized input tensor into a lower-precision output, using the

                    actual distribution of the values to maximize the usage of the lower bit depth and adjusting the output min and max ranges accordingly.

                    input_min, input_max
                    are scalar floats that specify the range for the float interpretation of the input data. For example, if input_min is -1.0f and input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 @@ -1894,7 +2171,7 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core may have large potential output ranges, but in practice have a distribution of input values that only uses a small fraction of the possible range. By feeding that output into this operator, we can reduce it from 32 bits down to 8 with - minimal loss of accuracy.

                    quantizeDownAndShrinkRange'

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: The float value that the minimum quantized output value represents.
                    • output_max: The float value that the maximum quantized output value represents.

                    quantizeV2

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` t 
                    => Tensor v'1 Float

                    input

                    -> Tensor v'2 Float

                    min_range: The minimum scalar value possibly produced for the input.

                    -> Tensor v'3 Float

                    max_range: The maximum scalar value possibly produced for the input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output: The quantized data produced from the float input.
                    • output_min: The actual minimum scalar value used for the output.
                    • output_max: The actual maximum scalar value used for the output.

                    Quantize the input tensor of type float to output tensor of type T.

                    min_range, max_range
                    are scalar floats that specify the range for + minimal loss of accuracy.

                    quantizeDownAndShrinkRange' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: The float value that the minimum quantized output value represents.
                    • output_max: The float value that the maximum quantized output value represents.

                    quantizeV2 Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] t 
                    => Tensor v'1 Float

                    input

                    -> Tensor v'2 Float

                    min_range: The minimum scalar value possibly produced for the input.

                    -> Tensor v'3 Float

                    max_range: The maximum scalar value possibly produced for the input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output: The quantized data produced from the float input.
                    • output_min: The actual minimum scalar value used for the output.
                    • output_max: The actual maximum scalar value used for the output.

                    Quantize the input tensor of type float to output tensor of type T.

                    min_range, max_range
                    are scalar floats that specify the range for the input data. The mode attribute controls exactly which calculations are used to convert the float values to their quantized equivalents.

                    In MIN_COMBINED mode, each value of the tensor will undergo the following:

                    ``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) @@ -1924,179 +2201,250 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core they will be separated by a small epsilon value to prevent ill-formed quantized buffers from being created. Otherwise, you can end up with buffers where all the quantized values map to the same float value, which causes problems for - operations that have to perform further calculations on them.

                    quantizeV2'

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` t 
                    => OpParams 
                    -> Tensor v'1 Float

                    input

                    -> Tensor v'2 Float

                    min_range: The minimum scalar value possibly produced for the input.

                    -> Tensor v'3 Float

                    max_range: The maximum scalar value possibly produced for the input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output: The quantized data produced from the float input.
                    • output_min: The actual minimum scalar value used for the output.
                    • output_max: The actual maximum scalar value used for the output.

                    quantizedAvgPool

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` t 
                    => Tensor v'1 t

                    input: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'3 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    Produces the average pool of the input tensor for quantized types.

                    quantizedAvgPool'

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'3 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    quantizedBatchNormWithGlobalNormalization

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => Bool

                    scale_after_normalization: A bool indicating whether the resulted tensor - needs to be multiplied with gamma.

                    -> Float

                    variance_epsilon: A small float number to avoid dividing by 0.

                    -> Tensor v'1 tinput

                    t: A 4D input Tensor.

                    -> Tensor v'2 Float

                    t_min: The value represented by the lowest quantized input.

                    -> Tensor v'3 Float

                    t_max: The value represented by the highest quantized input.

                    -> Tensor v'4 tinput

                    m: A 1D mean Tensor with size matching the last dimension of t. + operations that have to perform further calculations on them.

                    quantizeV2' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] t 
                    => OpParams 
                    -> Tensor v'1 Float

                    input

                    -> Tensor v'2 Float

                    min_range: The minimum scalar value possibly produced for the input.

                    -> Tensor v'3 Float

                    max_range: The maximum scalar value possibly produced for the input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output: The quantized data produced from the float input.
                    • output_min: The actual minimum scalar value used for the output.
                    • output_max: The actual maximum scalar value used for the output.

                    quantizedAdd Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) 
                    => Tensor v'1 t1

                    x

                    -> Tensor v'2 t2

                    y

                    -> Tensor v'3 Float

                    min_x: The float value that the lowest quantized x value represents.

                    -> Tensor v'4 Float

                    max_x: The float value that the highest quantized x value represents.

                    -> Tensor v'5 Float

                    min_y: The float value that the lowest quantized y value represents.

                    -> Tensor v'6 Float

                    max_y: The float value that the highest quantized y value represents.

                    -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

                    (z, min_z, max_z)

                    • z
                    • min_z: The float value that the lowest quantized output value represents.
                    • max_z: The float value that the highest quantized output value represents.
                    • NOTE*: QuantizedAdd supports limited forms of broadcasting. More about + broadcasting here

                    Returns x + y element-wise, working on quantized buffers.

                    quantizedAdd' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) 
                    => OpParams 
                    -> Tensor v'1 t1

                    x

                    -> Tensor v'2 t2

                    y

                    -> Tensor v'3 Float

                    min_x: The float value that the lowest quantized x value represents.

                    -> Tensor v'4 Float

                    max_x: The float value that the highest quantized x value represents.

                    -> Tensor v'5 Float

                    min_y: The float value that the lowest quantized y value represents.

                    -> Tensor v'6 Float

                    max_y: The float value that the highest quantized y value represents.

                    -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

                    (z, min_z, max_z)

                    • z
                    • min_z: The float value that the lowest quantized output value represents.
                    • max_z: The float value that the highest quantized output value represents.
                    • NOTE*: QuantizedAdd supports limited forms of broadcasting. More about + broadcasting here

                    quantizedAvgPool Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] t 
                    => Tensor v'1 t

                    input: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'3 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    Produces the average pool of the input tensor for quantized types.

                    quantizedAvgPool' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'3 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    quantizedBatchNormWithGlobalNormalization Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => Bool

                    scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

                    -> Float

                    variance_epsilon: A small float number to avoid dividing by 0.

                    -> Tensor v'1 tinput

                    t: A 4D input Tensor.

                    -> Tensor v'2 Float

                    t_min: The value represented by the lowest quantized input.

                    -> Tensor v'3 Float

                    t_max: The value represented by the highest quantized input.

                    -> Tensor v'4 tinput

                    m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, - or a saved moving average thereof.

                    -> Tensor v'5 Float

                    m_min: The value represented by the lowest quantized mean.

                    -> Tensor v'6 Float

                    m_max: The value represented by the highest quantized mean.

                    -> Tensor v'7 tinput

                    v: A 1D variance Tensor with size matching the last dimension of t. + or a saved moving average thereof.

                    -> Tensor v'5 Float

                    m_min: The value represented by the lowest quantized mean.

                    -> Tensor v'6 Float

                    m_max: The value represented by the highest quantized mean.

                    -> Tensor v'7 tinput

                    v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, - or a saved moving average thereof.

                    -> Tensor v'8 Float

                    v_min: The value represented by the lowest quantized variance.

                    -> Tensor v'9 Float

                    v_max: The value represented by the highest quantized variance.

                    -> Tensor v'10 tinput

                    beta: A 1D beta Tensor with size matching the last dimension of t. - An offset to be added to the normalized tensor.

                    -> Tensor v'11 Float

                    beta_min: The value represented by the lowest quantized offset.

                    -> Tensor v'12 Float

                    beta_max: The value represented by the highest quantized offset.

                    -> Tensor v'13 tinput

                    gamma: A 1D gamma Tensor with size matching the last dimension of t. + or a saved moving average thereof.

                    -> Tensor v'8 Float

                    v_min: The value represented by the lowest quantized variance.

                    -> Tensor v'9 Float

                    v_max: The value represented by the highest quantized variance.

                    -> Tensor v'10 tinput

                    beta: A 1D beta Tensor with size matching the last dimension of t. + An offset to be added to the normalized tensor.

                    -> Tensor v'11 Float

                    beta_min: The value represented by the lowest quantized offset.

                    -> Tensor v'12 Float

                    beta_max: The value represented by the highest quantized offset.

                    -> Tensor v'13 tinput

                    gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied - with the normalized tensor.

                    -> Tensor v'14 Float

                    gamma_min: The value represented by the lowest quantized gamma.

                    -> Tensor v'15 Float

                    gamma_max: The value represented by the highest quantized gamma.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (result, result_min, result_max)

                    • result
                    • result_min
                    • result_max

                    Quantized Batch normalization.

                    This op is deprecated and will be removed in the future. Prefer - `tf.nn.batch_normalization`.

                    quantizedBatchNormWithGlobalNormalization'

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => OpParams 
                    -> Bool

                    scale_after_normalization: A bool indicating whether the resulted tensor - needs to be multiplied with gamma.

                    -> Float

                    variance_epsilon: A small float number to avoid dividing by 0.

                    -> Tensor v'1 tinput

                    t: A 4D input Tensor.

                    -> Tensor v'2 Float

                    t_min: The value represented by the lowest quantized input.

                    -> Tensor v'3 Float

                    t_max: The value represented by the highest quantized input.

                    -> Tensor v'4 tinput

                    m: A 1D mean Tensor with size matching the last dimension of t. + with the normalized tensor.

                    -> Tensor v'14 Float

                    gamma_min: The value represented by the lowest quantized gamma.

                    -> Tensor v'15 Float

                    gamma_max: The value represented by the highest quantized gamma.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (result, result_min, result_max)

                    • result
                    • result_min
                    • result_max

                    Quantized Batch normalization.

                    This op is deprecated and will be removed in the future. Prefer + `tf.nn.batch_normalization`.

                    quantizedBatchNormWithGlobalNormalization' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => OpParams 
                    -> Bool

                    scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

                    -> Float

                    variance_epsilon: A small float number to avoid dividing by 0.

                    -> Tensor v'1 tinput

                    t: A 4D input Tensor.

                    -> Tensor v'2 Float

                    t_min: The value represented by the lowest quantized input.

                    -> Tensor v'3 Float

                    t_max: The value represented by the highest quantized input.

                    -> Tensor v'4 tinput

                    m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, - or a saved moving average thereof.

                    -> Tensor v'5 Float

                    m_min: The value represented by the lowest quantized mean.

                    -> Tensor v'6 Float

                    m_max: The value represented by the highest quantized mean.

                    -> Tensor v'7 tinput

                    v: A 1D variance Tensor with size matching the last dimension of t. + or a saved moving average thereof.

                    -> Tensor v'5 Float

                    m_min: The value represented by the lowest quantized mean.

                    -> Tensor v'6 Float

                    m_max: The value represented by the highest quantized mean.

                    -> Tensor v'7 tinput

                    v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, - or a saved moving average thereof.

                    -> Tensor v'8 Float

                    v_min: The value represented by the lowest quantized variance.

                    -> Tensor v'9 Float

                    v_max: The value represented by the highest quantized variance.

                    -> Tensor v'10 tinput

                    beta: A 1D beta Tensor with size matching the last dimension of t. - An offset to be added to the normalized tensor.

                    -> Tensor v'11 Float

                    beta_min: The value represented by the lowest quantized offset.

                    -> Tensor v'12 Float

                    beta_max: The value represented by the highest quantized offset.

                    -> Tensor v'13 tinput

                    gamma: A 1D gamma Tensor with size matching the last dimension of t. + or a saved moving average thereof.

                    -> Tensor v'8 Float

                    v_min: The value represented by the lowest quantized variance.

                    -> Tensor v'9 Float

                    v_max: The value represented by the highest quantized variance.

                    -> Tensor v'10 tinput

                    beta: A 1D beta Tensor with size matching the last dimension of t. + An offset to be added to the normalized tensor.

                    -> Tensor v'11 Float

                    beta_min: The value represented by the lowest quantized offset.

                    -> Tensor v'12 Float

                    beta_max: The value represented by the highest quantized offset.

                    -> Tensor v'13 tinput

                    gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied - with the normalized tensor.

                    -> Tensor v'14 Float

                    gamma_min: The value represented by the lowest quantized gamma.

                    -> Tensor v'15 Float

                    gamma_max: The value represented by the highest quantized gamma.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (result, result_min, result_max)

                    • result
                    • result_min
                    • result_max

                    quantizedBiasAdd

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => Tensor v'1 t1

                    input

                    -> Tensor v'2 t2

                    bias: A 1D bias Tensor with size matching the last dimension of input.

                    -> Tensor v'3 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'4 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> Tensor v'5 Float

                    min_bias: The float value that the lowest quantized bias value represents.

                    -> Tensor v'6 Float

                    max_bias: The float value that the highest quantized bias value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, min_out, max_out)

                    • output
                    • min_out: The float value that the lowest quantized output value represents.
                    • max_out: The float value that the highest quantized output value represents.

                    Adds Tensor bias to Tensor input for Quantized types.

                    Broadcasts the values of bias on dimensions 0..N-2 of input.

                    quantizedBiasAdd'

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => OpParams 
                    -> Tensor v'1 t1

                    input

                    -> Tensor v'2 t2

                    bias: A 1D bias Tensor with size matching the last dimension of input.

                    -> Tensor v'3 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'4 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> Tensor v'5 Float

                    min_bias: The float value that the lowest quantized bias value represents.

                    -> Tensor v'6 Float

                    max_bias: The float value that the highest quantized bias value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, min_out, max_out)

                    • output
                    • min_out: The float value that the lowest quantized output value represents.
                    • max_out: The float value that the highest quantized output value represents.

                    quantizedConcat

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 Int32

                    concat_dim: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

                    -> [Tensor v'2 t]

                    values: The N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

                    -> [Tensor v'3 Float]

                    input_mins: The minimum scalar values for each of the input tensors.

                    -> [Tensor v'4 Float]

                    input_maxes: The maximum scalar values for each of the input tensors.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output: A Tensor with the concatenation of values stacked along the + with the normalized tensor.

                    -> Tensor v'14 Float

                    gamma_min: The value represented by the lowest quantized gamma.

                    -> Tensor v'15 Float

                    gamma_max: The value represented by the highest quantized gamma.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (result, result_min, result_max)

                    • result
                    • result_min
                    • result_max

                    quantizedBiasAdd Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => Tensor v'1 t1

                    input

                    -> Tensor v'2 t2

                    bias: A 1D bias Tensor with size matching the last dimension of input.

                    -> Tensor v'3 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'4 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> Tensor v'5 Float

                    min_bias: The float value that the lowest quantized bias value represents.

                    -> Tensor v'6 Float

                    max_bias: The float value that the highest quantized bias value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, min_out, max_out)

                    • output
                    • min_out: The float value that the lowest quantized output value represents.
                    • max_out: The float value that the highest quantized output value represents.

                    Adds Tensor bias to Tensor input for Quantized types.

                    Broadcasts the values of bias on dimensions 0..N-2 of input.

                    quantizedBiasAdd' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => OpParams 
                    -> Tensor v'1 t1

                    input

                    -> Tensor v'2 t2

                    bias: A 1D bias Tensor with size matching the last dimension of input.

                    -> Tensor v'3 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'4 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> Tensor v'5 Float

                    min_bias: The float value that the lowest quantized bias value represents.

                    -> Tensor v'6 Float

                    max_bias: The float value that the highest quantized bias value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, min_out, max_out)

                    • output
                    • min_out: The float value that the lowest quantized output value represents.
                    • max_out: The float value that the highest quantized output value represents.

                    quantizedConcat Source #

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 Int32

                    concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

                    -> [Tensor v'2 t]

                    values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

                    -> [Tensor v'3 Float]

                    input_mins: The minimum scalar values for each of the input tensors.

                    -> [Tensor v'4 Float]

                    input_maxes: The maximum scalar values for each of the input tensors.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output: A Tensor with the concatenation of values stacked along the concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.
                    • output_min: The float value that the minimum quantized output value represents.
                    • output_max: The float value that the maximum quantized output value represents.

                    Concatenates quantized tensors along one dimension.

                    quantizedConcat'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 Int32

                    concat_dim: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

                    -> [Tensor v'2 t]

                    values: The N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

                    -> [Tensor v'3 Float]

                    input_mins: The minimum scalar values for each of the input tensors.

                    -> [Tensor v'4 Float]

                    input_maxes: The maximum scalar values for each of the input tensors.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output: A Tensor with the concatenation of values stacked along the + in concat_dim where it has the sum of the sizes.
                    • output_min: The float value that the minimum quantized output value represents.
                    • output_max: The float value that the maximum quantized output value represents.

                    Concatenates quantized tensors along one dimension.

                    quantizedConcat' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 Int32

                    concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

                    -> [Tensor v'2 t]

                    values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

                    -> [Tensor v'3 Float]

                    input_mins: The minimum scalar values for each of the input tensors.

                    -> [Tensor v'4 Float]

                    input_maxes: The maximum scalar values for each of the input tensors.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output: A Tensor with the concatenation of values stacked along the concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.
                    • output_min: The float value that the minimum quantized output value represents.
                    • output_max: The float value that the maximum quantized output value represents.

                    quantizedConv2D

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` tfilter, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => Tensor v'1 tinput

                    input

                    -> Tensor v'2 tfilter

                    filter: filter's input_depth dimension must match input's depth dimensions.

                    -> Tensor v'3 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'4 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> Tensor v'5 Float

                    min_filter: The float value that the lowest quantized filter value represents.

                    -> Tensor v'6 Float

                    max_filter: The float value that the highest quantized filter value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    Computes a 2D convolution given quantized 4D input and filter tensors.

                    The inputs are quantized tensors where the lowest value represents the real + in concat_dim where it has the sum of the sizes.

                  • output_min: The float value that the minimum quantized output value represents.
                  • output_max: The float value that the maximum quantized output value represents.
                  • quantizedConv2D Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => Tensor v'1 tinput

                    input

                    -> Tensor v'2 tfilter

                    filter: filter's input_depth dimension must match input's depth dimensions.

                    -> Tensor v'3 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'4 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> Tensor v'5 Float

                    min_filter: The float value that the lowest quantized filter value represents.

                    -> Tensor v'6 Float

                    max_filter: The float value that the highest quantized filter value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    Computes a 2D convolution given quantized 4D input and filter tensors.

                    The inputs are quantized tensors where the lowest value represents the real number of the associated minimum, and the highest represents the maximum. This means that you can only interpret the quantized output in the same way, by - taking the returned minimum and maximum values into account.

                    quantizedConv2D'

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` tfilter, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    input

                    -> Tensor v'2 tfilter

                    filter: filter's input_depth dimension must match input's depth dimensions.

                    -> Tensor v'3 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'4 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> Tensor v'5 Float

                    min_filter: The float value that the lowest quantized filter value represents.

                    -> Tensor v'6 Float

                    max_filter: The float value that the highest quantized filter value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    quantizedInstanceNorm

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` t 
                    => Tensor v'1 t

                    x: A 4D input Tensor.

                    -> Tensor v'2 Float

                    x_min: The value represented by the lowest quantized input.

                    -> Tensor v'3 Float

                    x_max: The value represented by the highest quantized input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (y, y_min, y_max)

                    • y: A 4D Tensor.
                    • y_min: The value represented by the lowest quantized output.
                    • y_max: The value represented by the highest quantized output.

                    Quantized Instance normalization.

                    quantizedInstanceNorm'

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x: A 4D input Tensor.

                    -> Tensor v'2 Float

                    x_min: The value represented by the lowest quantized input.

                    -> Tensor v'3 Float

                    x_max: The value represented by the highest quantized input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (y, y_min, y_max)

                    • y: A 4D Tensor.
                    • y_min: The value represented by the lowest quantized output.
                    • y_max: The value represented by the highest quantized output.

                    quantizedMatMul

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` toutput) 
                    => Tensor v'1 t1

                    a: Must be a two-dimensional tensor.

                    -> Tensor v'2 t2

                    b: Must be a two-dimensional tensor.

                    -> Tensor v'3 Float

                    min_a: The float value that the lowest quantized a value represents.

                    -> Tensor v'4 Float

                    max_a: The float value that the highest quantized a value represents.

                    -> Tensor v'5 Float

                    min_b: The float value that the lowest quantized b value represents.

                    -> Tensor v'6 Float

                    max_b: The float value that the highest quantized b value represents.

                    -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

                    (out, min_out, max_out)

                    • out
                    • min_out: The float value that the lowest quantized output value represents.
                    • max_out: The float value that the highest quantized output value represents.

                    Perform a quantized matrix multiplication of a by the matrix b.

                    The inputs must be two-dimensional matrices and the inner dimension of + taking the returned minimum and maximum values into account.

                    quantizedConv2D' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    input

                    -> Tensor v'2 tfilter

                    filter: filter's input_depth dimension must match input's depth dimensions.

                    -> Tensor v'3 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'4 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> Tensor v'5 Float

                    min_filter: The float value that the lowest quantized filter value represents.

                    -> Tensor v'6 Float

                    max_filter: The float value that the highest quantized filter value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    quantizedInstanceNorm Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] t 
                    => Tensor v'1 t

                    x: A 4D input Tensor.

                    -> Tensor v'2 Float

                    x_min: The value represented by the lowest quantized input.

                    -> Tensor v'3 Float

                    x_max: The value represented by the highest quantized input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (y, y_min, y_max)

                    • y: A 4D Tensor.
                    • y_min: The value represented by the lowest quantized output.
                    • y_max: The value represented by the highest quantized output.

                    Quantized Instance normalization.

                    quantizedInstanceNorm' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] t 
                    => OpParams 
                    -> Tensor v'1 t

                    x: A 4D input Tensor.

                    -> Tensor v'2 Float

                    x_min: The value represented by the lowest quantized input.

                    -> Tensor v'3 Float

                    x_max: The value represented by the highest quantized input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (y, y_min, y_max)

                    • y: A 4D Tensor.
                    • y_min: The value represented by the lowest quantized output.
                    • y_max: The value represented by the highest quantized output.

                    quantizedMatMul Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) 
                    => Tensor v'1 t1

                    a: Must be a two-dimensional tensor.

                    -> Tensor v'2 t2

                    b: Must be a two-dimensional tensor.

                    -> Tensor v'3 Float

                    min_a: The float value that the lowest quantized a value represents.

                    -> Tensor v'4 Float

                    max_a: The float value that the highest quantized a value represents.

                    -> Tensor v'5 Float

                    min_b: The float value that the lowest quantized b value represents.

                    -> Tensor v'6 Float

                    max_b: The float value that the highest quantized b value represents.

                    -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

                    (out, min_out, max_out)

                    • out
                    • min_out: The float value that the lowest quantized output value represents.
                    • max_out: The float value that the highest quantized output value represents.

                    Perform a quantized matrix multiplication of a by the matrix b.

                    The inputs must be two-dimensional matrices and the inner dimension of a (after being transposed if transpose_a is non-zero) must match the outer dimension of b (after being transposed if transposed_b is - non-zero).

                    quantizedMatMul'

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` toutput) 
                    => OpParams 
                    -> Tensor v'1 t1

                    a: Must be a two-dimensional tensor.

                    -> Tensor v'2 t2

                    b: Must be a two-dimensional tensor.

                    -> Tensor v'3 Float

                    min_a: The float value that the lowest quantized a value represents.

                    -> Tensor v'4 Float

                    max_a: The float value that the highest quantized a value represents.

                    -> Tensor v'5 Float

                    min_b: The float value that the lowest quantized b value represents.

                    -> Tensor v'6 Float

                    max_b: The float value that the highest quantized b value represents.

                    -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

                    (out, min_out, max_out)

                    • out
                    • min_out: The float value that the lowest quantized output value represents.
                    • max_out: The float value that the highest quantized output value represents.

                    quantizedMaxPool

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` t 
                    => Tensor v'1 t

                    input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.

                    -> Tensor v'2 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'3 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    Produces the max pool of the input tensor for quantized types.

                    quantizedMaxPool'

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.

                    -> Tensor v'2 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'3 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    quantizedRelu

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'3 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    Computes Quantized Rectified Linear: `max(features, 0)`

                    quantizedRelu'

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'3 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    quantizedRelu6

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'3 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`

                    quantizedRelu6'

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'3 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    quantizedReluX

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    max_value

                    -> Tensor v'3 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'4 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`

                    quantizedReluX'

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    max_value

                    -> Tensor v'3 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'4 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    quantizedReshape

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tshape) 
                    => Tensor v'1 t

                    tensor

                    -> Tensor v'2 tshape

                    shape: Defines the shape of the output tensor.

                    -> Tensor v'3 Float

                    input_min: The minimum value of the input.

                    -> Tensor v'4 Float

                    input_max: The maximum value of the input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: This value is copied from input_min.
                    • output_max: This value is copied from input_max.

                    Reshapes a quantized tensor as per the Reshape op.

                    ```

                    quantizedReshape'

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tshape) 
                    => OpParams 
                    -> Tensor v'1 t

                    tensor

                    -> Tensor v'2 tshape

                    shape: Defines the shape of the output tensor.

                    -> Tensor v'3 Float

                    input_min: The minimum value of the input.

                    -> Tensor v'4 Float

                    input_max: The maximum value of the input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: This value is copied from input_min.
                    • output_max: This value is copied from input_max.

                    queueClose

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' ControlNode 

                    Closes the given queue.

                    This operation signals that no more elements will be enqueued in the + non-zero).

                    quantizedMatMul' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) 
                    => OpParams 
                    -> Tensor v'1 t1

                    a: Must be a two-dimensional tensor.

                    -> Tensor v'2 t2

                    b: Must be a two-dimensional tensor.

                    -> Tensor v'3 Float

                    min_a: The float value that the lowest quantized a value represents.

                    -> Tensor v'4 Float

                    max_a: The float value that the highest quantized a value represents.

                    -> Tensor v'5 Float

                    min_b: The float value that the lowest quantized b value represents.

                    -> Tensor v'6 Float

                    max_b: The float value that the highest quantized b value represents.

                    -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

                    (out, min_out, max_out)

                    • out
                    • min_out: The float value that the lowest quantized output value represents.
                    • max_out: The float value that the highest quantized output value represents.

                    quantizedMaxPool Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] t 
                    => Tensor v'1 t

                    input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.

                    -> Tensor v'2 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'3 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    Produces the max pool of the input tensor for quantized types.

                    quantizedMaxPool' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] t 
                    => OpParams 
                    -> Tensor v'1 t

                    input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.

                    -> Tensor v'2 Float

                    min_input: The float value that the lowest quantized input value represents.

                    -> Tensor v'3 Float

                    max_input: The float value that the highest quantized input value represents.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, min_output, max_output)

                    • output
                    • min_output: The float value that the lowest quantized output value represents.
                    • max_output: The float value that the highest quantized output value represents.

                    quantizedMul Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) 
                    => Tensor v'1 t1

                    x

                    -> Tensor v'2 t2

                    y

                    -> Tensor v'3 Float

                    min_x: The float value that the lowest quantized x value represents.

                    -> Tensor v'4 Float

                    max_x: The float value that the highest quantized x value represents.

                    -> Tensor v'5 Float

                    min_y: The float value that the lowest quantized y value represents.

                    -> Tensor v'6 Float

                    max_y: The float value that the highest quantized y value represents.

                    -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

                    (z, min_z, max_z)

                    • z
                    • min_z: The float value that the lowest quantized output value represents.
                    • max_z: The float value that the highest quantized output value represents.
                    • NOTE*: QuantizedMul supports limited forms of broadcasting. More about + broadcasting here

                    Returns x * y element-wise, working on quantized buffers.

                    quantizedMul' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) 
                    => OpParams 
                    -> Tensor v'1 t1

                    x

                    -> Tensor v'2 t2

                    y

                    -> Tensor v'3 Float

                    min_x: The float value that the lowest quantized x value represents.

                    -> Tensor v'4 Float

                    max_x: The float value that the highest quantized x value represents.

                    -> Tensor v'5 Float

                    min_y: The float value that the lowest quantized y value represents.

                    -> Tensor v'6 Float

                    max_y: The float value that the highest quantized y value represents.

                    -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

                    (z, min_z, max_z)

                    • z
                    • min_z: The float value that the lowest quantized output value represents.
                    • max_z: The float value that the highest quantized output value represents.
                    • NOTE*: QuantizedMul supports limited forms of broadcasting. More about + broadcasting here

                    quantizedRelu Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'3 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    Computes Quantized Rectified Linear: `max(features, 0)`

                    quantizedRelu' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'3 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    quantizedRelu6 Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'3 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`

                    quantizedRelu6' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'3 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    quantizedReluX Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    max_value

                    -> Tensor v'3 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'4 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`

                    quantizedReluX' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    features

                    -> Tensor v'2 Float

                    max_value

                    -> Tensor v'3 Float

                    min_features: The float value that the lowest quantized value represents.

                    -> Tensor v'4 Float

                    max_features: The float value that the highest quantized value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (activations, min_activations, max_activations)

                    • activations: Has the same output shape as "features".
                    • min_activations: The float value that the lowest quantized value represents.
                    • max_activations: The float value that the highest quantized value represents.

                    quantizedReshape Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tshape) 
                    => Tensor v'1 t

                    tensor

                    -> Tensor v'2 tshape

                    shape: Defines the shape of the output tensor.

                    -> Tensor v'3 Float

                    input_min: The minimum value of the input.

                    -> Tensor v'4 Float

                    input_max: The maximum value of the input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: This value is copied from input_min.
                    • output_max: This value is copied from input_max.

                    Reshapes a quantized tensor as per the Reshape op.

                    ```

                    quantizedReshape' Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tshape) 
                    => OpParams 
                    -> Tensor v'1 t

                    tensor

                    -> Tensor v'2 tshape

                    shape: Defines the shape of the output tensor.

                    -> Tensor v'3 Float

                    input_min: The minimum value of the input.

                    -> Tensor v'4 Float

                    input_max: The maximum value of the input.

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: This value is copied from input_min.
                    • output_max: This value is copied from input_max.

                    quantizedResizeBilinear Source #

                    Arguments

                    :: OneOf '[Int32, Word8, Float] t 
                    => Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor v'3 Float

                    min

                    -> Tensor v'4 Float

                    max

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (resized_images, out_min, out_max)

                    • resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.
                    • out_min
                    • out_max

                    Resize quantized images to size using quantized bilinear interpolation.

                    Input images and output images must be quantized types.

                    quantizedResizeBilinear' Source #

                    Arguments

                    :: OneOf '[Int32, Word8, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor v'3 Float

                    min

                    -> Tensor v'4 Float

                    max

                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)

                    (resized_images, out_min, out_max)

                    • resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.
                    • out_min
                    • out_max

                    queueClose Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' ControlNode 

                    Closes the given queue.

                    This operation signals that no more elements will be enqueued in the given queue. Subsequent Enqueue(Many) operations will fail. Subsequent Dequeue(Many) operations will continue to succeed if sufficient elements remain in the queue. Subsequent Dequeue(Many) - operations that would block will fail immediately.

                    queueClose'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' ControlNode 

                    queueCloseV2

                    Arguments

                    :: MonadBuild m' 
                    => ResourceHandle

                    handle: The handle to a queue.

                    -> m' ControlNode 

                    Closes the given queue.

                    This operation signals that no more elements will be enqueued in the + operations that would block will fail immediately.

                    queueClose' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' ControlNode 

                    queueCloseV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> m' ControlNode 

                    Closes the given queue.

                    This operation signals that no more elements will be enqueued in the given queue. Subsequent Enqueue(Many) operations will fail. Subsequent Dequeue(Many) operations will continue to succeed if sufficient elements remain in the queue. Subsequent Dequeue(Many) - operations that would block will fail immediately.

                    queueCloseV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> ResourceHandle

                    handle: The handle to a queue.

                    -> m' ControlNode 

                    queueDequeue

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues a tuple of one or more tensors from the given queue.

                    This operation has k outputs, where k is the number of components + operations that would block will fail immediately.

                    queueCloseV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> m' ControlNode 

                    queueDequeue Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues a tuple of one or more tensors from the given queue.

                    This operation has k outputs, where k is the number of components in the tuples stored in the given queue, and output i is the ith component of the dequeued tuple.

                    N.B. If the queue is empty, this operation will block until an element - has been dequeued (or timeout_ms elapses, if specified).

                    queueDequeue'

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueMany

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues n tuples of one or more tensors from the given queue.

                    If the queue is closed and there are fewer than n elements, then an + has been dequeued (or timeout_ms elapses, if specified).

                    queueDequeue' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueMany Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues n tuples of one or more tensors from the given queue.

                    If the queue is closed and there are fewer than n elements, then an OutOfRange error is returned.

                    This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components - in the dequeued tuple will have size n in the 0th dimension.

                    This operation has k outputs, where k is the number of components in - the tuples stored in the given queue, and output i is the ith - component of the dequeued tuple.

                    N.B. If the queue is empty, this operation will block until n elements - have been dequeued (or timeout_ms elapses, if specified).

                    queueDequeueMany'

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueManyV2

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => ResourceHandle

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues n tuples of one or more tensors from the given queue.

                    If the queue is closed and there are fewer than n elements, then an + in the dequeued tuple will have size n in the 0th dimension.

                    This operation has k outputs, where k is the number of components in + the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

                    N.B. If the queue is empty, this operation will block until n elements + have been dequeued (or timeout_ms elapses, if specified).

                    queueDequeueMany' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueManyV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues n tuples of one or more tensors from the given queue.

                    If the queue is closed and there are fewer than n elements, then an OutOfRange error is returned.

                    This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components - in the dequeued tuple will have size n in the 0th dimension.

                    This operation has k outputs, where k is the number of components in - the tuples stored in the given queue, and output i is the ith - component of the dequeued tuple.

                    N.B. If the queue is empty, this operation will block until n elements - have been dequeued (or timeout_ms elapses, if specified).

                    queueDequeueManyV2'

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> ResourceHandle

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueUpTo

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues n tuples of one or more tensors from the given queue.

                    This operation is not supported by all queues. If a queue does not support - DequeueUpTo, then an Unimplemented error is returned.

                    If the queue is closed and there are more than 0 but less than n elements - remaining, then instead of returning an OutOfRange error like - QueueDequeueMany, less than n elements are returned immediately. If the queue - is closed and there are 0 elements left in the queue, then an OutOfRange - error is returned just like in QueueDequeueMany. Otherwise the behavior - is identical to QueueDequeueMany:

                    This operation concatenates queue-element component tensors along the + in the dequeued tuple will have size n in the 0th dimension.

                    This operation has k outputs, where k is the number of components in + the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

                    N.B. If the queue is empty, this operation will block until n elements + have been dequeued (or timeout_ms elapses, if specified).

                    queueDequeueManyV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueUpTo Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues n tuples of one or more tensors from the given queue.

                    This operation is not supported by all queues. If a queue does not support + DequeueUpTo, then an Unimplemented error is returned.

                    If the queue is closed and there are more than 0 but less than n + elements remaining, then instead of returning an OutOfRange error like + QueueDequeueMany, less than n elements are returned immediately. If + the queue is closed and there are 0 elements left in the queue, then + an OutOfRange error is returned just like in QueueDequeueMany. + Otherwise the behavior is identical to QueueDequeueMany:

                    This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components - in the dequeued tuple will have size n in the 0th dimension.

                    This operation has k outputs, where k is the number of components in - the tuples stored in the given queue, and output i is the ith - component of the dequeued tuple.

                    queueDequeueUpTo'

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueUpToV2

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => ResourceHandle

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues n tuples of one or more tensors from the given queue.

                    This operation is not supported by all queues. If a queue does not support - DequeueUpTo, then an Unimplemented error is returned.

                    If the queue is closed and there are more than 0 but less than n elements - remaining, then instead of returning an OutOfRange error like - QueueDequeueMany, less than n elements are returned immediately. If the queue - is closed and there are 0 elements left in the queue, then an OutOfRange - error is returned just like in QueueDequeueMany. Otherwise the behavior - is identical to QueueDequeueMany:

                    This operation concatenates queue-element component tensors along the + in the dequeued tuple will have size n in the 0th dimension.

                    This operation has k outputs, where k is the number of components in + the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

                    queueDequeueUpTo' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueUpToV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues n tuples of one or more tensors from the given queue.

                    This operation is not supported by all queues. If a queue does not support + DequeueUpTo, then an Unimplemented error is returned.

                    If the queue is closed and there are more than 0 but less than n + elements remaining, then instead of returning an OutOfRange error like + QueueDequeueMany, less than n elements are returned immediately. If + the queue is closed and there are 0 elements left in the queue, then + an OutOfRange error is returned just like in QueueDequeueMany. + Otherwise the behavior is identical to QueueDequeueMany:

                    This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components - in the dequeued tuple will have size n in the 0th dimension.

                    This operation has k outputs, where k is the number of components in - the tuples stored in the given queue, and output i is the ith - component of the dequeued tuple.

                    queueDequeueUpToV2'

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> ResourceHandle

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueV2

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => ResourceHandle

                    handle: The handle to a queue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues a tuple of one or more tensors from the given queue.

                    This operation has k outputs, where k is the number of components + in the dequeued tuple will have size n in the 0th dimension.

                    This operation has k outputs, where k is the number of components in + the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

                    queueDequeueUpToV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> Tensor v'2 Int32

                    n: The number of tuples to dequeue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueDequeueV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    Dequeues a tuple of one or more tensors from the given queue.

                    This operation has k outputs, where k is the number of components in the tuples stored in the given queue, and output i is the ith component of the dequeued tuple.

                    N.B. If the queue is empty, this operation will block until an element - has been dequeued (or timeout_ms elapses, if specified).

                    queueDequeueV2'

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> ResourceHandle

                    handle: The handle to a queue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueEnqueue

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should be taken.

                    -> m' ControlNode 

                    Enqueues a tuple of one or more tensors in the given queue.

                    The components input has k elements, which correspond to the components of + has been dequeued (or timeout_ms elapses, if specified).

                    queueDequeueV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes component_types) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> m' (TensorList Value component_types)

                    components: One or more tensors that were dequeued as a tuple.

                    queueEnqueue Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should be taken.

                    -> m' ControlNode 

                    Enqueues a tuple of one or more tensors in the given queue.

                    The components input has k elements, which correspond to the components of tuples stored in the given queue.

                    N.B. If the queue is full, this operation will block until the given - element has been enqueued (or timeout_ms elapses, if specified).

                    queueEnqueue'

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should be taken.

                    -> m' ControlNode 

                    queueEnqueueMany

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should - be taken.

                    -> m' ControlNode 

                    Enqueues zero or more tuples of one or more tensors in the given queue.

                    This operation slices each component tensor along the 0th dimension to + element has been enqueued (or timeout_ms elapses, if specified).

                    queueEnqueue' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should be taken.

                    -> m' ControlNode 

                    queueEnqueueMany Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should + be taken.

                    -> m' ControlNode 

                    Enqueues zero or more tuples of one or more tensors in the given queue.

                    This operation slices each component tensor along the 0th dimension to make multiple queue elements. All of the tuple components must have the same size in the 0th dimension.

                    The components input has k elements, which correspond to the components of tuples stored in the given queue.

                    N.B. If the queue is full, this operation will block until the given - elements have been enqueued (or timeout_ms elapses, if specified).

                    queueEnqueueMany'

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should - be taken.

                    -> m' ControlNode 

                    queueEnqueueManyV2

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => ResourceHandle

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should - be taken.

                    -> m' ControlNode 

                    Enqueues zero or more tuples of one or more tensors in the given queue.

                    This operation slices each component tensor along the 0th dimension to + elements have been enqueued (or timeout_ms elapses, if specified).

                    queueEnqueueMany' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should + be taken.

                    -> m' ControlNode 

                    queueEnqueueManyV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should + be taken.

                    -> m' ControlNode 

                    Enqueues zero or more tuples of one or more tensors in the given queue.

                    This operation slices each component tensor along the 0th dimension to make multiple queue elements. All of the tuple components must have the same size in the 0th dimension.

                    The components input has k elements, which correspond to the components of tuples stored in the given queue.

                    N.B. If the queue is full, this operation will block until the given - elements have been enqueued (or timeout_ms elapses, if specified).

                    queueEnqueueManyV2'

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => OpParams 
                    -> ResourceHandle

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should - be taken.

                    -> m' ControlNode 

                    queueEnqueueV2

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => ResourceHandle

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should be taken.

                    -> m' ControlNode 

                    Enqueues a tuple of one or more tensors in the given queue.

                    The components input has k elements, which correspond to the components of + elements have been enqueued (or timeout_ms elapses, if specified).

                    queueEnqueueManyV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should + be taken.

                    -> m' ControlNode 

                    queueEnqueueV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should be taken.

                    -> m' ControlNode 

                    Enqueues a tuple of one or more tensors in the given queue.

                    The components input has k elements, which correspond to the components of tuples stored in the given queue.

                    N.B. If the queue is full, this operation will block until the given - element has been enqueued (or timeout_ms elapses, if specified).

                    queueEnqueueV2'

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => OpParams 
                    -> ResourceHandle

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should be taken.

                    -> m' ControlNode 

                    queueSize

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (Tensor Value Int32)

                    size: The number of elements in the given queue.

                    Computes the number of elements in the given queue.

                    queueSize'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (Tensor Value Int32)

                    size: The number of elements in the given queue.

                    queueSizeV2

                    Arguments

                    :: MonadBuild m' 
                    => ResourceHandle

                    handle: The handle to a queue.

                    -> m' (Tensor Value Int32)

                    size: The number of elements in the given queue.

                    Computes the number of elements in the given queue.

                    queueSizeV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> ResourceHandle

                    handle: The handle to a queue.

                    -> m' (Tensor Value Int32)

                    size: The number of elements in the given queue.

                    rGBToHSV

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => Tensor v'1 t

                    images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.

                    -> Tensor Build t

                    output: images converted to HSV.

                    Converts one or more images from RGB to HSV.

                    Outputs a tensor of the same shape as the images tensor, containing the HSV + element has been enqueued (or timeout_ms elapses, if specified).

                    queueEnqueueV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes tcomponents) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> TensorList v'2 tcomponents

                    components: One or more tensors from which the enqueued tensors should be taken.

                    -> m' ControlNode 

                    queueIsClosed Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (Tensor Value Bool)

                    is_closed

                    Returns true if queue is closed.

                    This operation returns true if the queue is closed and false if the queue + is open.

                    queueIsClosed' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (Tensor Value Bool)

                    is_closed

                    queueIsClosedV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> m' (Tensor Value Bool)

                    is_closed

                    Returns true if queue is closed.

                    This operation returns true if the queue is closed and false if the queue + is open.

                    queueIsClosedV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> m' (Tensor Value Bool)

                    is_closed

                    queueSize Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (Tensor Value Int32)

                    size: The number of elements in the given queue.

                    Computes the number of elements in the given queue.

                    queueSize' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    handle: The handle to a queue.

                    -> m' (Tensor Value Int32)

                    size: The number of elements in the given queue.

                    queueSizeV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> m' (Tensor Value Int32)

                    size: The number of elements in the given queue.

                    Computes the number of elements in the given queue.

                    queueSizeV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    handle: The handle to a queue.

                    -> m' (Tensor Value Int32)

                    size: The number of elements in the given queue.

                    rFFT Source #

                    Arguments

                    :: Tensor v'1 Float

                    input: A float32 tensor.

                    -> Tensor v'2 Int32

                    fft_length: An int32 tensor of shape [1]. The FFT length.

                    -> Tensor Build (Complex Float)

                    output: A complex64 tensor of the same rank as input. The inner-most + dimension of input is replaced with the `fft_length / 2 + 1` unique + frequency components of its 1D Fourier transform.

                    compatibility(numpy) + Equivalent to np.fft.rfft + end_compatibility

                    Real-valued fast Fourier transform.

                    Computes the 1-dimensional discrete Fourier transform of a real-valued signal + over the inner-most dimension of input.

                    Since the DFT of a real signal is Hermitian-symmetric, RFFT only returns the + `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, + followed by the `fft_length / 2` positive-frequency terms.

                    Along the axis RFFT is computed on, if fft_length is smaller than the + corresponding dimension of input, the dimension is cropped. If it is larger, + the dimension is padded with zeros.

                    rFFT' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Float

                    input: A float32 tensor.

                    -> Tensor v'2 Int32

                    fft_length: An int32 tensor of shape [1]. The FFT length.

                    -> Tensor Build (Complex Float)

                    output: A complex64 tensor of the same rank as input. The inner-most + dimension of input is replaced with the `fft_length / 2 + 1` unique + frequency components of its 1D Fourier transform.

                    compatibility(numpy) + Equivalent to np.fft.rfft + end_compatibility

                    rFFT2D Source #

                    Arguments

                    :: Tensor v'1 Float

                    input: A float32 tensor.

                    -> Tensor v'2 Int32

                    fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.

                    -> Tensor Build (Complex Float)

                    output: A complex64 tensor of the same rank as input. The inner-most 2 + dimensions of input are replaced with their 2D Fourier transform. The + inner-most dimension contains `fft_length / 2 + 1` unique frequency + components.

                    compatibility(numpy) + Equivalent to np.fft.rfft2 + end_compatibility

                    2D real-valued fast Fourier transform.

                    Computes the 2-dimensional discrete Fourier transform of a real-valued signal + over the inner-most 2 dimensions of input.

                    Since the DFT of a real signal is Hermitian-symmetric, RFFT2D only returns the + `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + of output: the zero-frequency term, followed by the `fft_length / 2` + positive-frequency terms.

                    Along each axis RFFT2D is computed on, if fft_length is smaller than the + corresponding dimension of input, the dimension is cropped. If it is larger, + the dimension is padded with zeros.

                    rFFT2D' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Float

                    input: A float32 tensor.

                    -> Tensor v'2 Int32

                    fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.

                    -> Tensor Build (Complex Float)

                    output: A complex64 tensor of the same rank as input. The inner-most 2 + dimensions of input are replaced with their 2D Fourier transform. The + inner-most dimension contains `fft_length / 2 + 1` unique frequency + components.

                    compatibility(numpy) + Equivalent to np.fft.rfft2 + end_compatibility

                    rFFT3D Source #

                    Arguments

                    :: Tensor v'1 Float

                    input: A float32 tensor.

                    -> Tensor v'2 Int32

                    fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.

                    -> Tensor Build (Complex Float)

                    output: A complex64 tensor of the same rank as input. The inner-most 3 + dimensions of input are replaced with the their 3D Fourier transform. The + inner-most dimension contains `fft_length / 2 + 1` unique frequency + components.

                    compatibility(numpy) + Equivalent to np.fft.rfftn with 3 dimensions. + end_compatibility

                    3D real-valued fast Fourier transform.

                    Computes the 3-dimensional discrete Fourier transform of a real-valued signal + over the inner-most 3 dimensions of input.

                    Since the DFT of a real signal is Hermitian-symmetric, RFFT3D only returns the + `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + of output: the zero-frequency term, followed by the `fft_length / 2` + positive-frequency terms.

                    Along each axis RFFT3D is computed on, if fft_length is smaller than the + corresponding dimension of input, the dimension is cropped. If it is larger, + the dimension is padded with zeros.

                    rFFT3D' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 Float

                    input: A float32 tensor.

                    -> Tensor v'2 Int32

                    fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.

                    -> Tensor Build (Complex Float)

                    output: A complex64 tensor of the same rank as input. The inner-most 3 + dimensions of input are replaced with the their 3D Fourier transform. The + inner-most dimension contains `fft_length / 2 + 1` unique frequency + components.

                    compatibility(numpy) + Equivalent to np.fft.rfftn with 3 dimensions. + end_compatibility

                    rGBToHSV Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => Tensor v'1 t

                    images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.

                    -> Tensor Build t

                    output: images converted to HSV.

                    Converts one or more images from RGB to HSV.

                    Outputs a tensor of the same shape as the images tensor, containing the HSV value of the pixels. The output is only well defined if the value in images are in `[0,1]`.

                    `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 - corresponds to pure red, hue 13 is pure green, and 23 is pure blue.

                    rGBToHSV'

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.

                    -> Tensor Build t

                    output: images converted to HSV.

                    randomCrop

                    Arguments

                    :: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) 
                    => Tensor v'1 t

                    image: 3-D of shape `[height, width, channels]`.

                    -> Tensor v'2 Int64

                    size: 1-D of length 2 containing: crop_height, crop_width..

                    -> m' (Tensor Value t)

                    output: 3-D of shape `[crop_height, crop_width, channels].`

                    Randomly crop image.

                    size is a 1-D int64 tensor with 2 elements representing the crop height and + corresponds to pure red, hue 13 is pure green, and 23 is pure blue.

                    rGBToHSV' Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.

                    -> Tensor Build t

                    output: images converted to HSV.

                    randomCrop Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) 
                    => Tensor v'1 t

                    image: 3-D of shape `[height, width, channels]`.

                    -> Tensor v'2 Int64

                    size: 1-D of length 2 containing: crop_height, crop_width..

                    -> m' (Tensor Value t)

                    output: 3-D of shape `[crop_height, crop_width, channels].`

                    Randomly crop image.

                    size is a 1-D int64 tensor with 2 elements representing the crop height and width. The values must be non negative.

                    This Op picks a random location in image and crops a height by width rectangle from that location. The random location is picked so the cropped - area will fit inside the original image.

                    randomCrop'

                    Arguments

                    :: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) 
                    => OpParams 
                    -> Tensor v'1 t

                    image: 3-D of shape `[height, width, channels]`.

                    -> Tensor v'2 Int64

                    size: 1-D of length 2 containing: crop_height, crop_width..

                    -> m' (Tensor Value t)

                    output: 3-D of shape `[crop_height, crop_width, channels].`

                    randomGamma

                    Arguments

                    :: (MonadBuild m', OneOf `[Int32, Int64]` s, OneOf `[Word16, Double, Float]` t) 
                    => Tensor v'1 s

                    shape: 1-D integer tensor. Shape of independent samples to draw from each - distribution described by the shape parameters given in alpha.

                    -> Tensor v'2 t

                    alpha: A tensor in which each scalar is a "shape" parameter describing the - associated gamma distribution.

                    -> m' (Tensor Value t)

                    output: A tensor with shape `shape + shape(alpha)`. Each slice + area will fit inside the original image.

                    randomCrop' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    image: 3-D of shape `[height, width, channels]`.

                    -> Tensor v'2 Int64

                    size: 1-D of length 2 containing: crop_height, crop_width..

                    -> m' (Tensor Value t)

                    output: 3-D of shape `[crop_height, crop_width, channels].`

                    randomGamma Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) 
                    => Tensor v'1 s

                    shape: 1-D integer tensor. Shape of independent samples to draw from each + distribution described by the shape parameters given in alpha.

                    -> Tensor v'2 t

                    alpha: A tensor in which each scalar is a "shape" parameter describing the + associated gamma distribution.

                    -> m' (Tensor Value t)

                    output: A tensor with shape `shape + shape(alpha)`. Each slice `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.

                    Outputs random values from the Gamma distribution(s) described by alpha.

                    This op uses the algorithm by Marsaglia et al. to acquire samples via transformation-rejection from pairs of uniform and normal random variables. - See http://dl.acm.org/citation.cfm?id=358414

                    randomGamma'

                    Arguments

                    :: (MonadBuild m', OneOf `[Int32, Int64]` s, OneOf `[Word16, Double, Float]` t) 
                    => OpParams 
                    -> Tensor v'1 s

                    shape: 1-D integer tensor. Shape of independent samples to draw from each - distribution described by the shape parameters given in alpha.

                    -> Tensor v'2 t

                    alpha: A tensor in which each scalar is a "shape" parameter describing the - associated gamma distribution.

                    -> m' (Tensor Value t)

                    output: A tensor with shape `shape + shape(alpha)`. Each slice + See http://dl.acm.org/citation.cfm?id=358414

                    randomGamma' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 s

                    shape: 1-D integer tensor. Shape of independent samples to draw from each + distribution described by the shape parameters given in alpha.

                    -> Tensor v'2 t

                    alpha: A tensor in which each scalar is a "shape" parameter describing the + associated gamma distribution.

                    -> m' (Tensor Value t)

                    output: A tensor with shape `shape + shape(alpha)`. Each slice `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for - `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.

                    randomShuffle

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor v'1 t

                    value: The tensor to be shuffled.

                    -> m' (Tensor Value t)

                    output: A tensor of same shape and type as value, shuffled along its first + `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.

                    randomPoisson Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] dtype) 
                    => Tensor v'1 s

                    shape: 1-D integer tensor. Shape of independent samples to draw from each + distribution described by the shape parameters given in rate.

                    -> Tensor v'2 dtype

                    rate: A tensor in which each scalar is a "rate" parameter describing the + associated poisson distribution.

                    -> m' (Tensor Value dtype)

                    output: A tensor with shape `shape + shape(rate)`. Each slice + `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for + `rate[i0, i1, ...iN]`. The dtype of the output matches the dtype of + rate.

                    Outputs random values from the Poisson distribution(s) described by rate.

                    This op uses two algorithms, depending on rate. If rate >= 10, then + the algorithm by Hormann is used to acquire samples via + transformation-rejection. + See http://www.sciencedirect.com/science/article/pii/0167668793909974.

                    Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + random variables. + See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + Programming, Volume 2. Addison Wesley

                    randomPoisson' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] dtype) 
                    => OpParams 
                    -> Tensor v'1 s

                    shape: 1-D integer tensor. Shape of independent samples to draw from each + distribution described by the shape parameters given in rate.

                    -> Tensor v'2 dtype

                    rate: A tensor in which each scalar is a "rate" parameter describing the + associated poisson distribution.

                    -> m' (Tensor Value dtype)

                    output: A tensor with shape `shape + shape(rate)`. Each slice + `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for + `rate[i0, i1, ...iN]`. The dtype of the output matches the dtype of + rate.

                    randomShuffle Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor v'1 t

                    value: The tensor to be shuffled.

                    -> m' (Tensor Value t)

                    output: A tensor of same shape and type as value, shuffled along its first dimension.

                    Randomly shuffles a tensor along its first dimension.

                    The tensor is shuffled along dimension 0, such that each `value[j]` is mapped to one and only one `output[i]`. For example, a mapping that might occur for a - 3x2 tensor is:

                    ```prettyprint + 3x2 tensor is:

                    ``` [[1, 2], [[5, 6], [3, 4], ==> [1, 2], [5, 6]] [3, 4]] - ```

                    randomShuffle'

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor v'1 t

                    value: The tensor to be shuffled.

                    -> m' (Tensor Value t)

                    output: A tensor of same shape and type as value, shuffled along its first - dimension.

                    randomShuffleQueue

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    A queue that randomizes the order of elements.

                    randomShuffleQueue'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    randomShuffleQueueV2

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    component_types: The type of each component in a value.

                    -> m' ResourceHandle

                    handle: The handle to the queue.

                    A queue that randomizes the order of elements.

                    randomShuffleQueueV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    component_types: The type of each component in a value.

                    -> m' ResourceHandle

                    handle: The handle to the queue.

                    randomStandardNormal

                    Arguments

                    :: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
                    => Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> m' (Tensor Value dtype)

                    output: A tensor of the specified shape filled with random normal values.

                    Outputs random values from a normal distribution.

                    The generated values will have mean 0 and standard deviation 1.

                    randomStandardNormal'

                    Arguments

                    :: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
                    => OpParams 
                    -> Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> m' (Tensor Value dtype)

                    output: A tensor of the specified shape filled with random normal values.

                    randomUniform

                    Arguments

                    :: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
                    => Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> m' (Tensor Value dtype)

                    output: A tensor of the specified shape filled with uniform random values.

                    Outputs random values from a uniform distribution.

                    The generated values follow a uniform distribution in the range `[0, 1)`. The - lower bound 0 is included in the range, while the upper bound 1 is excluded.

                    randomUniform'

                    Arguments

                    :: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
                    => OpParams 
                    -> Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> m' (Tensor Value dtype)

                    output: A tensor of the specified shape filled with uniform random values.

                    randomUniformInt

                    Arguments

                    :: (MonadBuild m', OneOf `[Int32, Int64]` tout, OneOf `[Int32, Int64]` t) 
                    => Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> Tensor v'2 tout

                    minval: 0-D. Inclusive lower bound on the generated integers.

                    -> Tensor v'3 tout

                    maxval: 0-D. Exclusive upper bound on the generated integers.

                    -> m' (Tensor Value tout)

                    output: A tensor of the specified shape filled with uniform random integers.

                    Outputs random integers from a uniform distribution.

                    The generated values are uniform integers in the range `[minval, maxval)`. + ```

                    randomShuffle' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor v'1 t

                    value: The tensor to be shuffled.

                    -> m' (Tensor Value t)

                    output: A tensor of same shape and type as value, shuffled along its first + dimension.

                    randomShuffleQueue Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    A queue that randomizes the order of elements.

                    randomShuffleQueue' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Ref ByteString)

                    handle: The handle to the queue.

                    randomShuffleQueueV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Value ResourceHandle)

                    handle: The handle to the queue.

                    A queue that randomizes the order of elements.

                    randomShuffleQueueV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    component_types: The type of each component in a value.

                    -> m' (Tensor Value ResourceHandle)

                    handle: The handle to the queue.

                    randomStandardNormal Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                    => Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> m' (Tensor Value dtype)

                    output: A tensor of the specified shape filled with random normal values.

                    Outputs random values from a normal distribution.

                    The generated values will have mean 0 and standard deviation 1.

                    randomStandardNormal' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> m' (Tensor Value dtype)

                    output: A tensor of the specified shape filled with random normal values.

                    randomUniform Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                    => Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> m' (Tensor Value dtype)

                    output: A tensor of the specified shape filled with uniform random values.

                    Outputs random values from a uniform distribution.

                    The generated values follow a uniform distribution in the range `[0, 1)`. The + lower bound 0 is included in the range, while the upper bound 1 is excluded.

                    randomUniform' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> m' (Tensor Value dtype)

                    output: A tensor of the specified shape filled with uniform random values.

                    randomUniformInt Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) 
                    => Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> Tensor v'2 tout

                    minval: 0-D. Inclusive lower bound on the generated integers.

                    -> Tensor v'3 tout

                    maxval: 0-D. Exclusive upper bound on the generated integers.

                    -> m' (Tensor Value tout)

                    output: A tensor of the specified shape filled with uniform random integers.

                    Outputs random integers from a uniform distribution.

                    The generated values are uniform integers in the range `[minval, maxval)`. The lower bound minval is included in the range, while the upper bound maxval is excluded.

                    The random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly - smaller than the range of the output (either `2^32` or `2^64`).

                    randomUniformInt'

                    Arguments

                    :: (MonadBuild m', OneOf `[Int32, Int64]` tout, OneOf `[Int32, Int64]` t) 
                    => OpParams 
                    -> Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> Tensor v'2 tout

                    minval: 0-D. Inclusive lower bound on the generated integers.

                    -> Tensor v'3 tout

                    maxval: 0-D. Exclusive upper bound on the generated integers.

                    -> m' (Tensor Value tout)

                    output: A tensor of the specified shape filled with uniform random integers.

                    range

                    Arguments

                    :: OneOf `[Int32, Int64, Double, Float]` tidx 
                    => Tensor v'1 tidx

                    start: 0-D (scalar). First entry in the sequence.

                    -> Tensor v'2 tidx

                    limit: 0-D (scalar). Upper limit of sequence, exclusive.

                    -> Tensor v'3 tidx

                    delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

                    -> Tensor Build tidx

                    output: 1-D.

                    Creates a sequence of numbers.

                    This operation creates a sequence of numbers that begins at start and + smaller than the range of the output (either `2^32` or `2^64`).

                    randomUniformInt' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    shape: The shape of the output tensor.

                    -> Tensor v'2 tout

                    minval: 0-D. Inclusive lower bound on the generated integers.

                    -> Tensor v'3 tout

                    maxval: 0-D. Exclusive upper bound on the generated integers.

                    -> m' (Tensor Value tout)

                    output: A tensor of the specified shape filled with uniform random integers.

                    range Source #

                    Arguments

                    :: OneOf '[Int32, Int64, Double, Float] tidx 
                    => Tensor v'1 tidx

                    start: 0-D (scalar). First entry in the sequence.

                    -> Tensor v'2 tidx

                    limit: 0-D (scalar). Upper limit of sequence, exclusive.

                    -> Tensor v'3 tidx

                    delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

                    -> Tensor Build tidx

                    output: 1-D.

                    Creates a sequence of numbers.

                    This operation creates a sequence of numbers that begins at start and extends by increments of delta up to but not including limit.

                    For example:

                    ``` # start is 3 # limit is 18 # delta is 3 tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] - ```

                    range'

                    Arguments

                    :: OneOf `[Int32, Int64, Double, Float]` tidx 
                    => OpParams 
                    -> Tensor v'1 tidx

                    start: 0-D (scalar). First entry in the sequence.

                    -> Tensor v'2 tidx

                    limit: 0-D (scalar). Upper limit of sequence, exclusive.

                    -> Tensor v'3 tidx

                    delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

                    -> Tensor Build tidx

                    output: 1-D.

                    rank

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input

                    -> Tensor Build Int32

                    output

                    Returns the rank of a tensor.

                    This operation returns an integer representing the rank of input.

                    For example:

                    ```prettyprint + ```

                    range' Source #

                    Arguments

                    :: OneOf '[Int32, Int64, Double, Float] tidx 
                    => OpParams 
                    -> Tensor v'1 tidx

                    start: 0-D (scalar). First entry in the sequence.

                    -> Tensor v'2 tidx

                    limit: 0-D (scalar). Upper limit of sequence, exclusive.

                    -> Tensor v'3 tidx

                    delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

                    -> Tensor Build tidx

                    output: 1-D.

                    rangeDataset Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    output_types

                    -> Tensor v'1 Int64

                    start: corresponds to start in python's xrange().

                    -> Tensor v'2 Int64

                    stop: corresponds to stop in python's xrange().

                    -> Tensor v'3 Int64

                    step: corresponds to step in python's xrange().

                    -> m' (Tensor Value ResourceHandle)

                    handle

                    Creates a dataset with a range of values. Corresponds to python's xrange.

                    rangeDataset' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    output_types

                    -> Tensor v'1 Int64

                    start: corresponds to start in python's xrange().

                    -> Tensor v'2 Int64

                    stop: corresponds to stop in python's xrange().

                    -> Tensor v'3 Int64

                    step: corresponds to step in python's xrange().

                    -> m' (Tensor Value ResourceHandle)

                    handle

                    rank Source #

                    Arguments

                    :: TensorType t 
                    => Tensor v'1 t

                    input

                    -> Tensor Build Int32

                    output

                    Returns the rank of a tensor.

                    This operation returns an integer representing the rank of input.

                    For example:

                    ``` # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] # shape of tensor t is [2, 2, 3] rank(t) ==> 3 ```

                    • *Note**: The rank of a tensor is not the same as the rank of a matrix. The rank of a tensor is the number of indices required to uniquely select each element - of the tensor. Rank is also known as "order", "degree", or "ndims."

                    rank'

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor Build Int32

                    output

                    readFile

                    Arguments

                    :: Tensor v'1 ByteString

                    filename

                    -> Tensor Build ByteString

                    contents

                    Reads and outputs the entire contents of the input filename.

                    readFile'

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 ByteString

                    filename

                    -> Tensor Build ByteString

                    contents

                    readVariableOp

                    Arguments

                    :: (MonadBuild m', TensorType dtype) 
                    => ResourceHandle

                    resource: handle to the resource in which to store the variable.

                    -> m' (Tensor Value dtype)

                    value

                    Reads the value of a variable.

                    The tensor returned by this operation is immutable.

                    The value returned by this operation is guaranteed to be influenced by all the + of the tensor. Rank is also known as "order", "degree", or "ndims."

                    rank' Source #

                    Arguments

                    :: TensorType t 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor Build Int32

                    output

                    readFile Source #

                    Arguments

                    :: Tensor v'1 ByteString

                    filename

                    -> Tensor Build ByteString

                    contents

                    Reads and outputs the entire contents of the input filename.

                    readFile' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 ByteString

                    filename

                    -> Tensor Build ByteString

                    contents

                    readVariableOp Source #

                    Arguments

                    :: (MonadBuild m', TensorType dtype) 
                    => Tensor v'1 ResourceHandle

                    resource: handle to the resource in which to store the variable.

                    -> m' (Tensor Value dtype)

                    value

                    Reads the value of a variable.

                    The tensor returned by this operation is immutable.

                    The value returned by this operation is guaranteed to be influenced by all the writes on which this operation depends directly or indirectly, and to not be influenced by any of the writes which depend directly or indirectly on this - operation.

                    readVariableOp'

                    Arguments

                    :: (MonadBuild m', TensorType dtype) 
                    => OpParams 
                    -> ResourceHandle

                    resource: handle to the resource in which to store the variable.

                    -> m' (Tensor Value dtype)

                    value

                    readerNumRecordsProduced

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    records_produced

                    Returns the number of records this Reader has produced.

                    This is the same as the number of ReaderRead executions that have - succeeded.

                    readerNumRecordsProduced'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    records_produced

                    readerNumRecordsProducedV2

                    Arguments

                    :: MonadBuild m' 
                    => ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    records_produced

                    Returns the number of records this Reader has produced.

                    This is the same as the number of ReaderRead executions that have - succeeded.

                    readerNumRecordsProducedV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    records_produced

                    readerNumWorkUnitsCompleted

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    units_completed

                    Returns the number of work units this Reader has finished processing.

                    readerNumWorkUnitsCompleted'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    units_completed

                    readerNumWorkUnitsCompletedV2

                    Arguments

                    :: MonadBuild m' 
                    => ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    units_completed

                    Returns the number of work units this Reader has finished processing.

                    readerNumWorkUnitsCompletedV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    units_completed

                    readerRead

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor Ref ByteString

                    queue_handle: Handle to a Queue, with string work items.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (key, value)

                    • key: A scalar.
                    • value: A scalar.

                    Returns the next record (key, value pair) produced by a Reader.

                    Will dequeue from the input queue if necessary (e.g. when the + operation.

                    readVariableOp' Source #

                    Arguments

                    :: (MonadBuild m', TensorType dtype) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    resource: handle to the resource in which to store the variable.

                    -> m' (Tensor Value dtype)

                    value

                    readerNumRecordsProduced Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    records_produced

                    Returns the number of records this Reader has produced.

                    This is the same as the number of ReaderRead executions that have + succeeded.

                    readerNumRecordsProduced' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    records_produced

                    readerNumRecordsProducedV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    records_produced

                    Returns the number of records this Reader has produced.

                    This is the same as the number of ReaderRead executions that have + succeeded.

                    readerNumRecordsProducedV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    records_produced

                    readerNumWorkUnitsCompleted Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    units_completed

                    Returns the number of work units this Reader has finished processing.

                    readerNumWorkUnitsCompleted' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    units_completed

                    readerNumWorkUnitsCompletedV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    units_completed

                    Returns the number of work units this Reader has finished processing.

                    readerNumWorkUnitsCompletedV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value Int64)

                    units_completed

                    readerRead Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor Ref ByteString

                    queue_handle: Handle to a Queue, with string work items.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (key, value)

                    • key: A scalar.
                    • value: A scalar.

                    Returns the next record (key, value pair) produced by a Reader.

                    Will dequeue from the input queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished - with the previous file).

                    readerRead'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor Ref ByteString

                    queue_handle: Handle to a Queue, with string work items.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (key, value)

                    • key: A scalar.
                    • value: A scalar.

                    readerReadUpTo

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor Ref ByteString

                    queue_handle: Handle to a Queue, with string work items.

                    -> Tensor v'3 Int64

                    num_records: number of records to read from Reader.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (keys, values)

                    • keys: A 1-D tensor.
                    • values: A 1-D tensor.

                    Returns up to num_records (key, value) pairs produced by a Reader.

                    Will dequeue from the input queue if necessary (e.g. when the + with the previous file).

                    readerRead' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor Ref ByteString

                    queue_handle: Handle to a Queue, with string work items.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (key, value)

                    • key: A scalar.
                    • value: A scalar.

                    readerReadUpTo Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor Ref ByteString

                    queue_handle: Handle to a Queue, with string work items.

                    -> Tensor v'3 Int64

                    num_records: number of records to read from Reader.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (keys, values)

                    • keys: A 1-D tensor.
                    • values: A 1-D tensor.

                    Returns up to num_records (key, value) pairs produced by a Reader.

                    Will dequeue from the input queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished with the previous file). - It may return less than num_records even before the last batch.

                    readerReadUpTo'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor Ref ByteString

                    queue_handle: Handle to a Queue, with string work items.

                    -> Tensor v'3 Int64

                    num_records: number of records to read from Reader.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (keys, values)

                    • keys: A 1-D tensor.
                    • values: A 1-D tensor.

                    readerReadUpToV2

                    Arguments

                    :: MonadBuild m' 
                    => ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> ResourceHandle

                    queue_handle: Handle to a Queue, with string work items.

                    -> Tensor v'3 Int64

                    num_records: number of records to read from Reader.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (keys, values)

                    • keys: A 1-D tensor.
                    • values: A 1-D tensor.

                    Returns up to num_records (key, value) pairs produced by a Reader.

                    Will dequeue from the input queue if necessary (e.g. when the + It may return less than num_records even before the last batch.

                    readerReadUpTo' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor Ref ByteString

                    queue_handle: Handle to a Queue, with string work items.

                    -> Tensor v'3 Int64

                    num_records: number of records to read from Reader.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (keys, values)

                    • keys: A 1-D tensor.
                    • values: A 1-D tensor.

                    readerReadUpToV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ResourceHandle

                    queue_handle: Handle to a Queue, with string work items.

                    -> Tensor v'3 Int64

                    num_records: number of records to read from Reader.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (keys, values)

                    • keys: A 1-D tensor.
                    • values: A 1-D tensor.

                    Returns up to num_records (key, value) pairs produced by a Reader.

                    Will dequeue from the input queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished with the previous file). - It may return less than num_records even before the last batch.

                    readerReadUpToV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> ResourceHandle

                    queue_handle: Handle to a Queue, with string work items.

                    -> Tensor v'3 Int64

                    num_records: number of records to read from Reader.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (keys, values)

                    • keys: A 1-D tensor.
                    • values: A 1-D tensor.

                    readerReadV2

                    Arguments

                    :: MonadBuild m' 
                    => ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> ResourceHandle

                    queue_handle: Handle to a Queue, with string work items.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (key, value)

                    • key: A scalar.
                    • value: A scalar.

                    Returns the next record (key, value pair) produced by a Reader.

                    Will dequeue from the input queue if necessary (e.g. when the + It may return less than num_records even before the last batch.

                    readerReadUpToV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ResourceHandle

                    queue_handle: Handle to a Queue, with string work items.

                    -> Tensor v'3 Int64

                    num_records: number of records to read from Reader.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (keys, values)

                    • keys: A 1-D tensor.
                    • values: A 1-D tensor.

                    readerReadV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ResourceHandle

                    queue_handle: Handle to a Queue, with string work items.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (key, value)

                    • key: A scalar.
                    • value: A scalar.

                    Returns the next record (key, value pair) produced by a Reader.

                    Will dequeue from the input queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished - with the previous file).

                    readerReadV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> ResourceHandle

                    queue_handle: Handle to a Queue, with string work items.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (key, value)

                    • key: A scalar.
                    • value: A scalar.

                    readerReset

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' ControlNode 

                    Restore a Reader to its initial clean state.

                    readerReset'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' ControlNode 

                    readerResetV2

                    Arguments

                    :: MonadBuild m' 
                    => ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' ControlNode 

                    Restore a Reader to its initial clean state.

                    readerResetV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' ControlNode 

                    readerRestoreState

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ByteString

                    state: Result of a ReaderSerializeState of a Reader with type - matching reader_handle.

                    -> m' ControlNode 

                    Restore a reader to a previously saved state.

                    Not all Readers support being restored, so this can produce an - Unimplemented error.

                    readerRestoreState'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ByteString

                    state: Result of a ReaderSerializeState of a Reader with type - matching reader_handle.

                    -> m' ControlNode 

                    readerRestoreStateV2

                    Arguments

                    :: MonadBuild m' 
                    => ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ByteString

                    state: Result of a ReaderSerializeState of a Reader with type - matching reader_handle.

                    -> m' ControlNode 

                    Restore a reader to a previously saved state.

                    Not all Readers support being restored, so this can produce an - Unimplemented error.

                    readerRestoreStateV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ByteString

                    state: Result of a ReaderSerializeState of a Reader with type - matching reader_handle.

                    -> m' ControlNode 

                    readerSerializeState

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value ByteString)

                    state

                    Produce a string tensor that encodes the state of a Reader.

                    Not all Readers support being serialized, so this can produce an - Unimplemented error.

                    readerSerializeState'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value ByteString)

                    state

                    readerSerializeStateV2

                    Arguments

                    :: MonadBuild m' 
                    => ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value ByteString)

                    state

                    Produce a string tensor that encodes the state of a Reader.

                    Not all Readers support being serialized, so this can produce an - Unimplemented error.

                    readerSerializeStateV2'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value ByteString)

                    state

                    real

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
                    => Tensor v'1 t

                    input

                    -> Tensor Build tout

                    output

                    Returns the real part of a complex number.

                    Given a tensor input of complex numbers, this operation returns a tensor of + with the previous file).

                    readerReadV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ResourceHandle

                    queue_handle: Handle to a Queue, with string work items.

                    -> m' (Tensor Value ByteString, Tensor Value ByteString)

                    (key, value)

                    • key: A scalar.
                    • value: A scalar.

                    readerReset Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' ControlNode 

                    Restore a Reader to its initial clean state.

                    readerReset' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' ControlNode 

                    readerResetV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' ControlNode 

                    Restore a Reader to its initial clean state.

                    readerResetV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' ControlNode 

                    readerRestoreState Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ByteString

                    state: Result of a ReaderSerializeState of a Reader with type + matching reader_handle.

                    -> m' ControlNode 

                    Restore a reader to a previously saved state.

                    Not all Readers support being restored, so this can produce an + Unimplemented error.

                    readerRestoreState' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ByteString

                    state: Result of a ReaderSerializeState of a Reader with type + matching reader_handle.

                    -> m' ControlNode 

                    readerRestoreStateV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ByteString

                    state: Result of a ReaderSerializeState of a Reader with type + matching reader_handle.

                    -> m' ControlNode 

                    Restore a reader to a previously saved state.

                    Not all Readers support being restored, so this can produce an + Unimplemented error.

                    readerRestoreStateV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> Tensor v'2 ByteString

                    state: Result of a ReaderSerializeState of a Reader with type + matching reader_handle.

                    -> m' ControlNode 

                    readerSerializeState Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value ByteString)

                    state

                    Produce a string tensor that encodes the state of a Reader.

                    Not all Readers support being serialized, so this can produce an + Unimplemented error.

                    readerSerializeState' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor Ref ByteString

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value ByteString)

                    state

                    readerSerializeStateV2 Source #

                    Arguments

                    :: MonadBuild m' 
                    => Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value ByteString)

                    state

                    Produce a string tensor that encodes the state of a Reader.

                    Not all Readers support being serialized, so this can produce an + Unimplemented error.

                    readerSerializeStateV2' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    reader_handle: Handle to a Reader.

                    -> m' (Tensor Value ByteString)

                    state

                    real Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) 
                    => Tensor v'1 t

                    input

                    -> Tensor Build tout

                    output

                    Returns the real part of a complex number.

                    Given a tensor input of complex numbers, this operation returns a tensor of type float that is the real part of each element in input. All elements in input must be complex numbers of the form \(a + bj\), where *a* is the real part returned by this operation and *b* is the imaginary part.

                    For example:

                    ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] tf.real(input) ==> [-2.25, 3.25] - ```

                    real'

                    Arguments

                    :: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor Build tout

                    output

                    realDiv

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns x / y element-wise for real types.

                    If x and y are reals, this will return the floating-point division.

                    • NOTE*: Div supports broadcasting. More about broadcasting - here

                    reciprocal

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    Computes the reciprocal of x element-wise.

                    I.e., \(y = 1 / x\).

                    reciprocalGrad

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Computes the gradient for the inverse of x wrt its input.

                    Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy - is the corresponding input gradient.

                    reciprocalGrad'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    recordInput

                    Arguments

                    :: MonadBuild m' 
                    => m' (Tensor Value ByteString)

                    records: A tensor of shape [batch_size].

                    Emits randomized records.

                    recordInput'

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> m' (Tensor Value ByteString)

                    records: A tensor of shape [batch_size].

                    reduceJoin

                    Arguments

                    :: Tensor v'1 ByteString

                    inputs: The input to be joined. All reduced indices must have non-zero size.

                    -> Tensor v'2 Int32

                    reduction_indices: The dimensions to reduce over. Dimensions are reduced in the + ```

                    real' Source #

                    Arguments

                    :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) 
                    => OpParams 
                    -> Tensor v'1 t

                    input

                    -> Tensor Build tout

                    output

                    realDiv Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Returns x / y element-wise for real types.

                    If x and y are reals, this will return the floating-point division.

                    • NOTE*: Div supports broadcasting. More about broadcasting + here

                    reciprocal Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    Computes the reciprocal of x element-wise.

                    I.e., \(y = 1 / x\).

                    reciprocalGrad Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Computes the gradient for the inverse of x wrt its input.

                    Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy + is the corresponding input gradient.

                    recordInput Source #

                    Arguments

                    :: MonadBuild m' 
                    => m' (Tensor Value ByteString)

                    records: A tensor of shape [batch_size].

                    Emits randomized records.

                    recordInput' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> m' (Tensor Value ByteString)

                    records: A tensor of shape [batch_size].

                    reduceJoin Source #

                    Arguments

                    :: Tensor v'1 ByteString

                    inputs: The input to be joined. All reduced indices must have non-zero size.

                    -> Tensor v'2 Int32

                    reduction_indices: The dimensions to reduce over. Dimensions are reduced in the order specified. Omitting reduction_indices is equivalent to passing - `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.

                    -> Tensor Build ByteString

                    output: Has shape equal to that of the input with reduced dimensions removed or + `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.

                    -> Tensor Build ByteString

                    output: Has shape equal to that of the input with reduced dimensions removed or set to `1` depending on keep_dims.

                    Joins a string Tensor across the given dimensions.

                    Computes the string join across dimensions in the given string Tensor of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input strings with the given separator (default: empty string). Negative indices are - counted backwards from the end, with `-1` being equivalent to `n - 1`.

                    For example:

                    ``` + counted backwards from the end, with `-1` being equivalent to `n - 1`.

                    For example:

                    ```python # tensor a is [["a", "b"], ["c", "d"]] tf.reduce_join(a, 0) ==> ["ac", "bd"] tf.reduce_join(a, 1) ==> ["ab", "cd"] @@ -2108,30 +2456,38 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core tf.reduce_join(a, [0, 1]) ==> ["acbd"] tf.reduce_join(a, [1, 0]) ==> ["abcd"] tf.reduce_join(a, []) ==> ["abcd"] - ```

                    reduceJoin'

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 ByteString

                    inputs: The input to be joined. All reduced indices must have non-zero size.

                    -> Tensor v'2 Int32

                    reduction_indices: The dimensions to reduce over. Dimensions are reduced in the + ```

                    reduceJoin' Source #

                    Arguments

                    :: OpParams 
                    -> Tensor v'1 ByteString

                    inputs: The input to be joined. All reduced indices must have non-zero size.

                    -> Tensor v'2 Int32

                    reduction_indices: The dimensions to reduce over. Dimensions are reduced in the order specified. Omitting reduction_indices is equivalent to passing - `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.

                    -> Tensor Build ByteString

                    output: Has shape equal to that of the input with reduced dimensions removed or - set to `1` depending on keep_dims.

                    refEnter

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    data: The tensor to be made available to the child frame.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    Creates or finds a child frame, and makes `data` available to the child frame.

                    The unique frame_name is used by the Executor to identify frames. If + `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.

                    -> Tensor Build ByteString

                    output: Has shape equal to that of the input with reduced dimensions removed or + set to `1` depending on keep_dims.

                    refEnter Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    data: The tensor to be made available to the child frame.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    Creates or finds a child frame, and makes `data` available to the child frame.

                    The unique frame_name is used by the Executor to identify frames. If is_constant is true, output is a constant in the child frame; otherwise it may be changed in the child frame. At most parallel_iterations iterations - are run in parallel in the child frame.

                    refEnter'

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    data: The tensor to be made available to the child frame.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    refExit

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    data: The tensor to be made available to the parent frame.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    Exits the current frame to its parent frame.

                    Exit makes its input `data` available to the parent frame.

                    refExit'

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    data: The tensor to be made available to the parent frame.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    refIdentity

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    input

                    -> m' (Tensor Ref t)

                    output

                    Return the same ref tensor as the input ref tensor.

                    refIdentity'

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    input

                    -> m' (Tensor Ref t)

                    output

                    refMerge

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => [Tensor Ref t]

                    inputs: The input tensors, exactly one of which will become available.

                    -> m' (Tensor Ref t, Tensor Value Int32)

                    (output, value_index)

                    • output: Will be set to the available input tensor.
                    • value_index: The index of the chosen input tensor in inputs.

                    Forwards the value of an available tensor from inputs to output.

                    Merge waits for at least one of the tensors in inputs to become available. + are run in parallel in the child frame.

                    refEnter' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    data: The tensor to be made available to the child frame.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    refExit Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    data: The tensor to be made available to the parent frame.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    Exits the current frame to its parent frame.

                    Exit makes its input `data` available to the parent frame.

                    refExit' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    data: The tensor to be made available to the parent frame.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    refIdentity Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    input

                    -> m' (Tensor Ref t)

                    output

                    Return the same ref tensor as the input ref tensor.

                    refIdentity' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    input

                    -> m' (Tensor Ref t)

                    output

                    refMerge Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => [Tensor Ref t]

                    inputs: The input tensors, exactly one of which will become available.

                    -> m' (Tensor Ref t, Tensor Value Int32)

                    (output, value_index)

                    • output: Will be set to the available input tensor.
                    • value_index: The index of the chosen input tensor in inputs.

                    Forwards the value of an available tensor from inputs to output.

                    Merge waits for at least one of the tensors in inputs to become available. It is usually combined with Switch to implement branching.

                    Merge forwards the first tensor for become available to output, and sets - value_index to its index in inputs.

                    refMerge'

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> [Tensor Ref t]

                    inputs: The input tensors, exactly one of which will become available.

                    -> m' (Tensor Ref t, Tensor Value Int32)

                    (output, value_index)

                    • output: Will be set to the available input tensor.
                    • value_index: The index of the chosen input tensor in inputs.

                    refNextIteration

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    data: The tensor to be made available to the next iteration.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    Makes its input available to the next iteration.

                    refNextIteration'

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    data: The tensor to be made available to the next iteration.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    refSelect

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor v'1 Int32

                    index: A scalar that determines the input that gets selected.

                    -> [Tensor Ref t]

                    inputs: A list of ref tensors, one of which will be forwarded to output.

                    -> m' (Tensor Ref t)

                    output: The forwarded tensor.

                    Forwards the indexth element of inputs to output.

                    refSelect'

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor v'1 Int32

                    index: A scalar that determines the input that gets selected.

                    -> [Tensor Ref t]

                    inputs: A list of ref tensors, one of which will be forwarded to output.

                    -> m' (Tensor Ref t)

                    output: The forwarded tensor.

                    refSwitch

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    data: The ref tensor to be forwarded to the appropriate output.

                    -> Tensor v'2 Bool

                    pred: A scalar that specifies which output port will receive data.

                    -> m' (Tensor Ref t, Tensor Ref t)

                    (output_false, output_true)

                    • output_false: If pred is false, data will be forwarded to this output.
                    • output_true: If pred is true, data will be forwarded to this output.

                    Forwards the ref tensor `data` to the output port determined by pred.

                    If pred is true, the `data` input is forwarded to output_true. Otherwise, - the data goes to output_false.

                    See also Switch and Merge.

                    refSwitch'

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    data: The ref tensor to be forwarded to the appropriate output.

                    -> Tensor v'2 Bool

                    pred: A scalar that specifies which output port will receive data.

                    -> m' (Tensor Ref t, Tensor Ref t)

                    (output_false, output_true)

                    • output_false: If pred is false, data will be forwarded to this output.
                    • output_true: If pred is true, data will be forwarded to this output.

                    relu

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    features

                    -> Tensor Build t

                    activations

                    Computes rectified linear: `max(features, 0)`.

                    relu'

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    features

                    -> Tensor Build t

                    activations

                    relu6

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    features

                    -> Tensor Build t

                    activations

                    Computes rectified linear 6: `min(max(features, 0), 6)`.

                    relu6'

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    features

                    -> Tensor Build t

                    activations

                    relu6Grad

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    gradients: The backpropagated gradients to the corresponding Relu6 operation.

                    -> Tensor v'2 t

                    features: The features passed as input to the corresponding Relu6 operation.

                    -> Tensor Build t

                    backprops: The gradients: - `gradients * (features > 0) * (features < 6)`.

                    Computes rectified linear 6 gradients for a Relu6 operation.

                    relu6Grad'

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    gradients: The backpropagated gradients to the corresponding Relu6 operation.

                    -> Tensor v'2 t

                    features: The features passed as input to the corresponding Relu6 operation.

                    -> Tensor Build t

                    backprops: The gradients: - `gradients * (features > 0) * (features < 6)`.

                    reluGrad

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    gradients: The backpropagated gradients to the corresponding Relu operation.

                    -> Tensor v'2 t

                    features: The features passed as input to the corresponding Relu operation, OR - the outputs of that operation (both work equivalently).

                    -> Tensor Build t

                    backprops: `gradients * (features > 0)`.

                    Computes rectified linear gradients for a Relu operation.

                    reluGrad'

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    gradients: The backpropagated gradients to the corresponding Relu operation.

                    -> Tensor v'2 t

                    features: The features passed as input to the corresponding Relu operation, OR - the outputs of that operation (both work equivalently).

                    -> Tensor Build t

                    backprops: `gradients * (features > 0)`.

                    requantizationRange

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` tinput 
                    => Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> (Tensor Build Float, Tensor Build Float)

                    (output_min, output_max)

                    • output_min: The computed min output.
                    • output_max: the computed max output.

                    Given a quantized tensor described by (input, input_min, input_max), outputs a

                    range that covers the actual values present in that tensor. This op is + value_index to its index in inputs.

                    refMerge' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> [Tensor Ref t]

                    inputs: The input tensors, exactly one of which will become available.

                    -> m' (Tensor Ref t, Tensor Value Int32)

                    (output, value_index)

                    • output: Will be set to the available input tensor.
                    • value_index: The index of the chosen input tensor in inputs.

                    refNextIteration Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    data: The tensor to be made available to the next iteration.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    Makes its input available to the next iteration.

                    refNextIteration' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    data: The tensor to be made available to the next iteration.

                    -> m' (Tensor Ref t)

                    output: The same tensor as `data`.

                    refSelect Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor v'1 Int32

                    index: A scalar that determines the input that gets selected.

                    -> [Tensor Ref t]

                    inputs: A list of ref tensors, one of which will be forwarded to output.

                    -> m' (Tensor Ref t)

                    output: The forwarded tensor.

                    Forwards the indexth element of inputs to output.

                    refSelect' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor v'1 Int32

                    index: A scalar that determines the input that gets selected.

                    -> [Tensor Ref t]

                    inputs: A list of ref tensors, one of which will be forwarded to output.

                    -> m' (Tensor Ref t)

                    output: The forwarded tensor.

                    refSwitch Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => Tensor Ref t

                    data: The ref tensor to be forwarded to the appropriate output.

                    -> Tensor v'2 Bool

                    pred: A scalar that specifies which output port will receive data.

                    -> m' (Tensor Ref t, Tensor Ref t)

                    (output_false, output_true)

                    • output_false: If pred is false, data will be forwarded to this output.
                    • output_true: If pred is true, data will be forwarded to this output.

                    Forwards the ref tensor `data` to the output port determined by pred.

                    If pred is true, the `data` input is forwarded to output_true. Otherwise, + the data goes to output_false.

                    See also Switch and Merge.

                    refSwitch' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t) 
                    => OpParams 
                    -> Tensor Ref t

                    data: The ref tensor to be forwarded to the appropriate output.

                    -> Tensor v'2 Bool

                    pred: A scalar that specifies which output port will receive data.

                    -> m' (Tensor Ref t, Tensor Ref t)

                    (output_false, output_true)

                    • output_false: If pred is false, data will be forwarded to this output.
                    • output_true: If pred is true, data will be forwarded to this output.

                    relu Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    features

                    -> Tensor Build t

                    activations

                    Computes rectified linear: `max(features, 0)`.

                    relu' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    features

                    -> Tensor Build t

                    activations

                    relu6 Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    features

                    -> Tensor Build t

                    activations

                    Computes rectified linear 6: `min(max(features, 0), 6)`.

                    relu6' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    features

                    -> Tensor Build t

                    activations

                    relu6Grad Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    gradients: The backpropagated gradients to the corresponding Relu6 operation.

                    -> Tensor v'2 t

                    features: The features passed as input to the corresponding Relu6 operation.

                    -> Tensor Build t

                    backprops: The gradients: + `gradients * (features > 0) * (features < 6)`.

                    Computes rectified linear 6 gradients for a Relu6 operation.

                    relu6Grad' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    gradients: The backpropagated gradients to the corresponding Relu6 operation.

                    -> Tensor v'2 t

                    features: The features passed as input to the corresponding Relu6 operation.

                    -> Tensor Build t

                    backprops: The gradients: + `gradients * (features > 0) * (features < 6)`.

                    reluGrad Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    gradients: The backpropagated gradients to the corresponding Relu operation.

                    -> Tensor v'2 t

                    features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

                    -> Tensor Build t

                    backprops: `gradients * (features > 0)`.

                    Computes rectified linear gradients for a Relu operation.

                    reluGrad' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    gradients: The backpropagated gradients to the corresponding Relu operation.

                    -> Tensor v'2 t

                    features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

                    -> Tensor Build t

                    backprops: `gradients * (features > 0)`.

                    remoteFusedGraphExecute Source #

                    Arguments

                    :: (TensorTypes tinputs, TensorTypes toutputs) 
                    => TensorList v'1 tinputs

                    inputs: Arbitrary number of tensors with arbitrary data types

                    -> TensorList Build toutputs

                    outputs: Arbitrary number of tensors with arbitrary data types

                    Execute a sub graph on a remote processor.

                    The graph specifications(such as graph itself, input tensors and output names) + are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo + as serialized_remote_fused_graph_execute_info. + The specifications will be passed to a dedicated registered + remote fused graph executor. The executor will send the graph specifications + to a remote processor and execute that graph. The execution results + will be passed to consumer nodes as outputs of this node.

                    remoteFusedGraphExecute' Source #

                    Arguments

                    :: (TensorTypes tinputs, TensorTypes toutputs) 
                    => OpParams 
                    -> TensorList v'1 tinputs

                    inputs: Arbitrary number of tensors with arbitrary data types

                    -> TensorList Build toutputs

                    outputs: Arbitrary number of tensors with arbitrary data types

                    repeatDataset Source #

                    Arguments

                    :: MonadBuild m' 
                    => [DataType]

                    output_types

                    -> Tensor v'1 ResourceHandle

                    input_dataset

                    -> Tensor v'2 Int64

                    count: A scalar representing the number of times that input_dataset should + be repeated. A value of `-1` indicates that it should be repeated infinitely.

                    -> m' (Tensor Value ResourceHandle)

                    handle

                    Creates a dataset that emits the outputs of input_dataset count times.

                    repeatDataset' Source #

                    Arguments

                    :: MonadBuild m' 
                    => OpParams 
                    -> [DataType]

                    output_types

                    -> Tensor v'1 ResourceHandle

                    input_dataset

                    -> Tensor v'2 Int64

                    count: A scalar representing the number of times that input_dataset should + be repeated. A value of `-1` indicates that it should be repeated infinitely.

                    -> m' (Tensor Value ResourceHandle)

                    handle

                    requantizationRange Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] tinput 
                    => Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> (Tensor Build Float, Tensor Build Float)

                    (output_min, output_max)

                    • output_min: The computed min output.
                    • output_max: the computed max output.

                    Given a quantized tensor described by (input, input_min, input_max), outputs a

                    range that covers the actual values present in that tensor. This op is typically used to produce the requested_output_min and requested_output_max for - Requantize.

                    requantizationRange'

                    Arguments

                    :: OneOf `[Int16, Int32, Word16, Word8]` tinput 
                    => OpParams 
                    -> Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> (Tensor Build Float, Tensor Build Float)

                    (output_min, output_max)

                    • output_min: The computed min output.
                    • output_max: the computed max output.

                    requantize

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> Tensor v'4 Float

                    requested_output_min: The float value that the minimum quantized output value represents.

                    -> Tensor v'5 Float

                    requested_output_max: The float value that the maximum quantized output value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: The requested_output_min value is copied into this output.
                    • output_max: The requested_output_max value is copied into this output.

                    Convert the quantized input tensor into a lower-precision output, using the

                    output range specified with requested_output_min and requested_output_max.

                    input_min, input_max
                    are scalar floats that specify the range for the float + Requantize.

                    requantizationRange' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Word16, Word8] tinput 
                    => OpParams 
                    -> Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> (Tensor Build Float, Tensor Build Float)

                    (output_min, output_max)

                    • output_min: The computed min output.
                    • output_max: the computed max output.

                    requantize Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> Tensor v'4 Float

                    requested_output_min: The float value that the minimum quantized output value represents.

                    -> Tensor v'5 Float

                    requested_output_max: The float value that the maximum quantized output value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: The requested_output_min value is copied into this output.
                    • output_max: The requested_output_max value is copied into this output.

                    Convert the quantized input tensor into a lower-precision output, using the

                    output range specified with requested_output_min and requested_output_max.

                    input_min, input_max
                    are scalar floats that specify the range for the float interpretation of the input data. For example, if input_min is -1.0f and input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 - value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.

                    requantize'

                    Arguments

                    :: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> Tensor v'4 Float

                    requested_output_min: The float value that the minimum quantized output value represents.

                    -> Tensor v'5 Float

                    requested_output_max: The float value that the maximum quantized output value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: The requested_output_min value is copied into this output.
                    • output_max: The requested_output_max value is copied into this output.

                    reshape

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tshape) 
                    => Tensor v'1 t

                    tensor

                    -> Tensor v'2 tshape

                    shape: Defines the shape of the output tensor.

                    -> Tensor Build t

                    output

                    Reshapes a tensor.

                    Given tensor, this operation returns a tensor that has the same values + value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.

                    requantize' Source #

                    Arguments

                    :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) 
                    => OpParams 
                    -> Tensor v'1 tinput

                    input

                    -> Tensor v'2 Float

                    input_min: The float value that the minimum quantized input value represents.

                    -> Tensor v'3 Float

                    input_max: The float value that the maximum quantized input value represents.

                    -> Tensor v'4 Float

                    requested_output_min: The float value that the minimum quantized output value represents.

                    -> Tensor v'5 Float

                    requested_output_max: The float value that the maximum quantized output value represents.

                    -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

                    (output, output_min, output_max)

                    • output
                    • output_min: The requested_output_min value is copied into this output.
                    • output_max: The requested_output_max value is copied into this output.

                    reshape Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tshape) 
                    => Tensor v'1 t

                    tensor

                    -> Tensor v'2 tshape

                    shape: Defines the shape of the output tensor.

                    -> Tensor Build t

                    output

                    Reshapes a tensor.

                    Given tensor, this operation returns a tensor that has the same values as tensor with shape shape.

                    If one component of shape is the special value -1, the size of that dimension is computed so that the total size remains constant. In particular, a shape of `[-1]` flattens into 1-D. At most one component of shape can be -1.

                    If shape is 1-D or higher, then the operation returns a tensor with shape shape filled with the values of tensor. In this case, the number of elements - implied by shape must be the same as the number of elements in tensor.

                    For example:

                    ```prettyprint + implied by shape must be the same as the number of elements in tensor.

                    For example:

                    ``` # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] # tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], @@ -2163,40 +2519,40 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [6, 6, 6]]]

                    # tensor t is [7] # shape `[]` reshapes to a scalar reshape(t, []) ==> 7 - ```

                    reshape'

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tshape) 
                    => OpParams 
                    -> Tensor v'1 t

                    tensor

                    -> Tensor v'2 tshape

                    shape: Defines the shape of the output tensor.

                    -> Tensor Build t

                    output

                    resizeArea

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

                    Resize images to size using area interpolation.

                    Input images can be of different types but output images are always float.

                    resizeArea'

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

                    resizeBicubic

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

                    Resize images to size using bicubic interpolation.

                    Input images can be of different types but output images are always float.

                    resizeBicubic'

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

                    resizeBilinear

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

                    Resize images to size using bilinear interpolation.

                    Input images can be of different types but output images are always float.

                    resizeBilinear'

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

                    resizeBilinearGrad

                    Arguments

                    :: OneOf `[Word16, Double, Float]` t 
                    => Tensor v'1 Float

                    grads: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 t

                    original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, - The image tensor that was resized.

                    -> Tensor Build t

                    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. + ```

                    reshape' Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tshape) 
                    => OpParams 
                    -> Tensor v'1 t

                    tensor

                    -> Tensor v'2 tshape

                    shape: Defines the shape of the output tensor.

                    -> Tensor Build t

                    output

                    resizeArea Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

                    Resize images to size using area interpolation.

                    Input images can be of different types but output images are always float.

                    resizeArea' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

                    resizeBicubic Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

                    Resize images to size using bicubic interpolation.

                    Input images can be of different types but output images are always float.

                    resizeBicubic' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

                    resizeBilinear Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

                    Resize images to size using bilinear interpolation.

                    Input images can be of different types but output images are always float.

                    resizeBilinear' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor Build Float

                    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

                    resizeBilinearGrad Source #

                    Arguments

                    :: OneOf '[Word16, Double, Float] t 
                    => Tensor v'1 Float

                    grads: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 t

                    original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, + The image tensor that was resized.

                    -> Tensor Build t

                    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients with respect to the input image. Input image must have been - float or double.

                    Computes the gradient of bilinear interpolation.

                    resizeBilinearGrad'

                    Arguments

                    :: OneOf `[Word16, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 Float

                    grads: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 t

                    original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, - The image tensor that was resized.

                    -> Tensor Build t

                    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. + float or double.

                    Computes the gradient of bilinear interpolation.

                    resizeBilinearGrad' Source #

                    Arguments

                    :: OneOf '[Word16, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 Float

                    grads: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 t

                    original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, + The image tensor that was resized.

                    -> Tensor Build t

                    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients with respect to the input image. Input image must have been - float or double.

                    resizeNearestNeighbor

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                    -> Tensor Build t

                    resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

                    Resize images to size using nearest neighbor interpolation.

                    resizeNearestNeighbor'

                    Arguments

                    :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

                    -> Tensor Build t

                    resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

                    resizeNearestNeighborGrad

                    Arguments

                    :: OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    grads: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The - original input size.

                    -> Tensor Build t

                    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients - with respect to the input image.

                    Computes the gradient of nearest neighbor interpolation.

                    resizeNearestNeighborGrad'

                    Arguments

                    :: OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    grads: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The - original input size.

                    -> Tensor Build t

                    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients - with respect to the input image.

                    resourceApplyAdadelta

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> ResourceHandle

                    accum_update: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay factor. Must be a scalar.

                    -> Tensor v'6 t

                    epsilon: Constant factor. Must be a scalar.

                    -> Tensor v'7 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the adadelta scheme.

                    accum = rho() * accum + (1 - rho()) * grad.square(); + float or double.

                    resizeNearestNeighbor Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor Build t

                    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

                    Resize images to size using nearest neighbor interpolation.

                    resizeNearestNeighbor' Source #

                    Arguments

                    :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    images: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

                    -> Tensor Build t

                    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

                    resizeNearestNeighborGrad Source #

                    Arguments

                    :: OneOf '[Int32, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    grads: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The + original input size.

                    -> Tensor Build t

                    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients + with respect to the input image.

                    Computes the gradient of nearest neighbor interpolation.

                    resizeNearestNeighborGrad' Source #

                    Arguments

                    :: OneOf '[Int32, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    grads: 4-D with shape `[batch, height, width, channels]`.

                    -> Tensor v'2 Int32

                    size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The + original input size.

                    -> Tensor Build t

                    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients + with respect to the input image.

                    resourceApplyAdadelta Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    accum_update: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay factor. Must be a scalar.

                    -> Tensor v'6 t

                    epsilon: Constant factor. Must be a scalar.

                    -> Tensor v'7 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the adadelta scheme.

                    accum = rho() * accum + (1 - rho()) * grad.square(); update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; update_accum = rho() * update_accum + (1 - rho()) * update.square(); - var -= update;

                    resourceApplyAdadelta'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> ResourceHandle

                    accum_update: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay factor. Must be a scalar.

                    -> Tensor v'6 t

                    epsilon: Constant factor. Must be a scalar.

                    -> Tensor v'7 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyAdagrad

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the adagrad scheme.

                    accum += grad * grad - var -= lr * grad * (1 / sqrt(accum))

                    resourceApplyAdagrad'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyAdagradDA

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    gradient_accumulator: Should be from a Variable().

                    -> ResourceHandle

                    gradient_squared_accumulator: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'8 Int64

                    global_step: Training step number. Must be a scalar.

                    -> m' ControlNode 

                    Update '*var' according to the proximal adagrad scheme.

                    resourceApplyAdagradDA'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    gradient_accumulator: Should be from a Variable().

                    -> ResourceHandle

                    gradient_squared_accumulator: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'8 Int64

                    global_step: Training step number. Must be a scalar.

                    -> m' ControlNode 

                    resourceApplyAdam

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    m: Should be from a Variable().

                    -> ResourceHandle

                    v: Should be from a Variable().

                    -> Tensor v'4 t

                    beta1_power: Must be a scalar.

                    -> Tensor v'5 t

                    beta2_power: Must be a scalar.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    beta1: Momentum factor. Must be a scalar.

                    -> Tensor v'8 t

                    beta2: Momentum factor. Must be a scalar.

                    -> Tensor v'9 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'10 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the Adam algorithm.

                    lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) + var -= update;

                    resourceApplyAdadelta' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    accum_update: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay factor. Must be a scalar.

                    -> Tensor v'6 t

                    epsilon: Constant factor. Must be a scalar.

                    -> Tensor v'7 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyAdagrad Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the adagrad scheme.

                    accum += grad * grad + var -= lr * grad * (1 / sqrt(accum))

                    resourceApplyAdagrad' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyAdagradDA Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    gradient_accumulator: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    gradient_squared_accumulator: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'8 Int64

                    global_step: Training step number. Must be a scalar.

                    -> m' ControlNode 

                    Update '*var' according to the proximal adagrad scheme.

                    resourceApplyAdagradDA' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    gradient_accumulator: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    gradient_squared_accumulator: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'8 Int64

                    global_step: Training step number. Must be a scalar.

                    -> m' ControlNode 

                    resourceApplyAdam Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    m: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    v: Should be from a Variable().

                    -> Tensor v'4 t

                    beta1_power: Must be a scalar.

                    -> Tensor v'5 t

                    beta2_power: Must be a scalar.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    beta1: Momentum factor. Must be a scalar.

                    -> Tensor v'8 t

                    beta2: Momentum factor. Must be a scalar.

                    -> Tensor v'9 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'10 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the Adam algorithm.

                    lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t - variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)

                    resourceApplyAdam'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    m: Should be from a Variable().

                    -> ResourceHandle

                    v: Should be from a Variable().

                    -> Tensor v'4 t

                    beta1_power: Must be a scalar.

                    -> Tensor v'5 t

                    beta2_power: Must be a scalar.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    beta1: Momentum factor. Must be a scalar.

                    -> Tensor v'8 t

                    beta2: Momentum factor. Must be a scalar.

                    -> Tensor v'9 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'10 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyCenteredRMSProp

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    mg: Should be from a Variable().

                    -> ResourceHandle

                    ms: Should be from a Variable().

                    -> ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'7 t

                    momentum

                    -> Tensor v'8 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'9 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the centered RMSProp algorithm.

                    The centered RMSProp algorithm uses an estimate of the centered second moment + variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)

                    resourceApplyAdam' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    m: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    v: Should be from a Variable().

                    -> Tensor v'4 t

                    beta1_power: Must be a scalar.

                    -> Tensor v'5 t

                    beta2_power: Must be a scalar.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    beta1: Momentum factor. Must be a scalar.

                    -> Tensor v'8 t

                    beta2: Momentum factor. Must be a scalar.

                    -> Tensor v'9 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'10 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyCenteredRMSProp Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    mg: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    ms: Should be from a Variable().

                    -> Tensor v'4 ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'7 t

                    momentum

                    -> Tensor v'8 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'9 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the centered RMSProp algorithm.

                    The centered RMSProp algorithm uses an estimate of the centered second moment (i.e., the variance) for normalization, as opposed to regular RMSProp, which uses the (uncentered) second moment. This often helps with training, but is slightly more expensive in terms of computation and memory.

                    Note that in dense implementation of this algorithm, mg, ms, and mom will @@ -2205,34 +2561,40 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core mean_grad = decay * mean_grad + (1-decay) * gradient

                    Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)

                    mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) - var <- var - mom

                    resourceApplyCenteredRMSProp'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    mg: Should be from a Variable().

                    -> ResourceHandle

                    ms: Should be from a Variable().

                    -> ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'7 t

                    momentum

                    -> Tensor v'8 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'9 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyFtrl

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regulariation. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 regulariation. Must be a scalar.

                    -> Tensor v'8 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    Update '*var' according to the Ftrl-proximal scheme.

                    accum_new = accum + grad * grad - linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + var <- var - mom

                    resourceApplyCenteredRMSProp' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    mg: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    ms: Should be from a Variable().

                    -> Tensor v'4 ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'7 t

                    momentum

                    -> Tensor v'8 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'9 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyFtrl Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regulariation. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 regulariation. Must be a scalar.

                    -> Tensor v'8 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    Update '*var' according to the Ftrl-proximal scheme.

                    accum_new = accum + grad * grad + linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 - accum = accum_new

                    resourceApplyFtrl'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regulariation. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 regulariation. Must be a scalar.

                    -> Tensor v'8 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    resourceApplyGradientDescent

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    delta: The change.

                    -> m' ControlNode 

                    Update '*var' by subtracting alpha * delta from it.

                    resourceApplyGradientDescent'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    delta: The change.

                    -> m' ControlNode 

                    resourceApplyMomentum

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    momentum: Momentum. Must be a scalar.

                    -> m' ControlNode 

                    Update '*var' according to the momentum scheme. Set use_nesterov = True if you

                    want to use Nesterov momentum.

                    accum = accum * momentum + grad - var -= lr * accum

                    resourceApplyMomentum'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    momentum: Momentum. Must be a scalar.

                    -> m' ControlNode 

                    resourceApplyProximalAdagrad

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'6 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.

                    accum += grad * grad + accum = accum_new

                    resourceApplyFtrl' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regulariation. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 regulariation. Must be a scalar.

                    -> Tensor v'8 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    resourceApplyFtrlV2 Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regulariation. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 shrinkage regulariation. Must be a scalar.

                    -> Tensor v'8 t

                    l2_shrinkage

                    -> Tensor v'9 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    Update '*var' according to the Ftrl-proximal scheme.

                    grad_with_shrinkage = grad + 2 * l2_shrinkage * var + accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + linear += grad_with_shrinkage + + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new

                    resourceApplyFtrlV2' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    l1: L1 regulariation. Must be a scalar.

                    -> Tensor v'7 t

                    l2: L2 shrinkage regulariation. Must be a scalar.

                    -> Tensor v'8 t

                    l2_shrinkage

                    -> Tensor v'9 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    resourceApplyGradientDescent Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    delta: The change.

                    -> m' ControlNode 

                    Update '*var' by subtracting alpha * delta from it.

                    resourceApplyGradientDescent' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    delta: The change.

                    -> m' ControlNode 

                    resourceApplyMomentum Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    momentum: Momentum. Must be a scalar.

                    -> m' ControlNode 

                    Update '*var' according to the momentum scheme. Set use_nesterov = True if you

                    want to use Nesterov momentum.

                    accum = accum * momentum + grad + var -= lr * accum

                    resourceApplyMomentum' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 t

                    momentum: Momentum. Must be a scalar.

                    -> m' ControlNode 

                    resourceApplyProximalAdagrad Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'6 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.

                    accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) - var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

                    resourceApplyProximalAdagrad'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'6 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyProximalGradientDescent

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'4 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    delta: The change.

                    -> m' ControlNode 

                    Update '*var' as FOBOS algorithm with fixed learning rate.

                    prox_v = var - alpha * delta - var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

                    resourceApplyProximalGradientDescent'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'4 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    delta: The change.

                    -> m' ControlNode 

                    resourceApplyRMSProp

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    ms: Should be from a Variable().

                    -> ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'6 t

                    momentum

                    -> Tensor v'7 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'8 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the RMSProp algorithm.

                    Note that in dense implementation of this algorithm, ms and mom will + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

                    resourceApplyProximalAdagrad' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'4 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'6 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceApplyProximalGradientDescent Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'4 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    delta: The change.

                    -> m' ControlNode 

                    Update '*var' as FOBOS algorithm with fixed learning rate.

                    prox_v = var - alpha * delta + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

                    resourceApplyProximalGradientDescent' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'4 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    delta: The change.

                    -> m' ControlNode 

                    resourceApplyRMSProp Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    ms: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'6 t

                    momentum

                    -> Tensor v'7 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'8 t

                    grad: The gradient.

                    -> m' ControlNode 

                    Update '*var' according to the RMSProp algorithm.

                    Note that in dense implementation of this algorithm, ms and mom will update even if the grad is zero, but in this sparse implementation, ms and mom will not update in iterations during which the grad is zero.

                    mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

                    ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - var <- var - mom

                    resourceApplyRMSProp'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    ms: Should be from a Variable().

                    -> ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'6 t

                    momentum

                    -> Tensor v'7 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'8 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceGather

                    Arguments

                    :: (MonadBuild m', TensorType dtype, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    resource

                    -> Tensor v'2 tindices

                    indices

                    -> m' (Tensor Value dtype)

                    output

                    Gather slices from the variable pointed to by resource according to indices.

                    indices must be an integer tensor of any dimension (usually 0-D or 1-D). + var <- var - mom

                    resourceApplyRMSProp' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    ms: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'6 t

                    momentum

                    -> Tensor v'7 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'8 t

                    grad: The gradient.

                    -> m' ControlNode 

                    resourceGather Source #

                    Arguments

                    :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    resource

                    -> Tensor v'2 tindices

                    indices

                    -> m' (Tensor Value dtype)

                    output

                    Gather slices from the variable pointed to by resource according to indices.

                    indices must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output tensor with shape `indices.shape + params.shape[1:]` where:

                    ```python # Scalar indices output[:, ..., :] = params[indices, :, ... :]

                    # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :]

                    # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] - ```

                    resourceGather'

                    Arguments

                    :: (MonadBuild m', TensorType dtype, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    resource

                    -> Tensor v'2 tindices

                    indices

                    -> m' (Tensor Value dtype)

                    output

                    resourceScatterAdd

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    resource: Should be from a Variable node.

                    -> Tensor v'2 tindices

                    indices: A tensor of indices into the first dimension of ref.

                    -> Tensor v'3 dtype

                    updates: A tensor of updated values to add to ref.

                    -> m' ControlNode 

                    Adds sparse updates to the variable referenced by resource.

                    This operation computes

                    # Scalar indices + ```

                    resourceGather' Source #

                    Arguments

                    :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    resource

                    -> Tensor v'2 tindices

                    indices

                    -> m' (Tensor Value dtype)

                    output

                    resourceScatterAdd Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    resource: Should be from a Variable node.

                    -> Tensor v'2 tindices

                    indices: A tensor of indices into the first dimension of ref.

                    -> Tensor v'3 dtype

                    updates: A tensor of updated values to add to ref.

                    -> m' ControlNode 

                    Adds sparse updates to the variable referenced by resource.

                    This operation computes

                    # Scalar indices ref[indices, ...] += updates[...]

                    # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...]

                    # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]

                    Duplicate entries are handled correctly: if multiple indices reference the same location, their contributions add.

                    Requires `updates.shape = indices.shape + ref.shape[1:]`.

                    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterAdd.png" alt - /div

                    resourceScatterAdd'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    resource: Should be from a Variable node.

                    -> Tensor v'2 tindices

                    indices: A tensor of indices into the first dimension of ref.

                    -> Tensor v'3 dtype

                    updates: A tensor of updated values to add to ref.

                    -> m' ControlNode 

                    resourceSparseApplyAdadelta

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    var

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> ResourceHandle

                    accum_update: : Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay factor. Must be a scalar.

                    -> Tensor v'6 t

                    epsilon: Constant factor. Must be a scalar.

                    -> Tensor v'7 t

                    grad: The gradient.

                    -> Tensor v'8 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    var: Should be from a Variable().

                    resourceSparseApplyAdadelta'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    var

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> ResourceHandle

                    accum_update: : Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay factor. Must be a scalar.

                    -> Tensor v'6 t

                    epsilon: Constant factor. Must be a scalar.

                    -> Tensor v'7 t

                    grad: The gradient.

                    -> Tensor v'8 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    resourceSparseApplyAdagrad

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    Update relevant entries in '*var' and '*accum' according to the adagrad scheme.

                    That is for rows we have grad for, we update var and accum as follows: + style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt + /div

                    resourceScatterAdd' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    resource: Should be from a Variable node.

                    -> Tensor v'2 tindices

                    indices: A tensor of indices into the first dimension of ref.

                    -> Tensor v'3 dtype

                    updates: A tensor of updated values to add to ref.

                    -> m' ControlNode 

                    resourceSparseApplyAdadelta Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    accum_update: : Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay factor. Must be a scalar.

                    -> Tensor v'6 t

                    epsilon: Constant factor. Must be a scalar.

                    -> Tensor v'7 t

                    grad: The gradient.

                    -> Tensor v'8 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    var: Should be from a Variable().

                    resourceSparseApplyAdadelta' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    accum_update: : Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay factor. Must be a scalar.

                    -> Tensor v'6 t

                    epsilon: Constant factor. Must be a scalar.

                    -> Tensor v'7 t

                    grad: The gradient.

                    -> Tensor v'8 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    resourceSparseApplyAdagrad Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    Update relevant entries in '*var' and '*accum' according to the adagrad scheme.

                    That is for rows we have grad for, we update var and accum as follows: accum += grad * grad - var -= lr * grad * (1 / sqrt(accum))

                    resourceSparseApplyAdagrad'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    resourceSparseApplyAdagradDA

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    gradient_accumulator: Should be from a Variable().

                    -> ResourceHandle

                    gradient_squared_accumulator: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'9 Int64

                    global_step: Training step number. Must be a scalar.

                    -> m' ControlNode 

                    Update entries in '*var' and '*accum' according to the proximal adagrad scheme.

                    resourceSparseApplyAdagradDA'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    gradient_accumulator: Should be from a Variable().

                    -> ResourceHandle

                    gradient_squared_accumulator: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'9 Int64

                    global_step: Training step number. Must be a scalar.

                    -> m' ControlNode 

                    resourceSparseApplyCenteredRMSProp

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    mg: Should be from a Variable().

                    -> ResourceHandle

                    ms: Should be from a Variable().

                    -> ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'7 t

                    momentum

                    -> Tensor v'8 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'9 t

                    grad: The gradient.

                    -> Tensor v'10 tindices

                    indices: A vector of indices into the first dimension of var, ms and mom.

                    -> m' ControlNode 

                    Update '*var' according to the centered RMSProp algorithm.

                    The centered RMSProp algorithm uses an estimate of the centered second moment + var -= lr * grad * (1 / sqrt(accum))

                    resourceSparseApplyAdagrad' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    resourceSparseApplyAdagradDA Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    gradient_accumulator: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    gradient_squared_accumulator: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'9 Int64

                    global_step: Training step number. Must be a scalar.

                    -> m' ControlNode 

                    Update entries in '*var' and '*accum' according to the proximal adagrad scheme.

                    resourceSparseApplyAdagradDA' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    gradient_accumulator: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    gradient_squared_accumulator: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'9 Int64

                    global_step: Training step number. Must be a scalar.

                    -> m' ControlNode 

                    resourceSparseApplyCenteredRMSProp Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    mg: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    ms: Should be from a Variable().

                    -> Tensor v'4 ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'7 t

                    momentum

                    -> Tensor v'8 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'9 t

                    grad: The gradient.

                    -> Tensor v'10 tindices

                    indices: A vector of indices into the first dimension of var, ms and mom.

                    -> m' ControlNode 

                    Update '*var' according to the centered RMSProp algorithm.

                    The centered RMSProp algorithm uses an estimate of the centered second moment (i.e., the variance) for normalization, as opposed to regular RMSProp, which uses the (uncentered) second moment. This often helps with training, but is slightly more expensive in terms of computation and memory.

                    Note that in dense implementation of this algorithm, mg, ms, and mom will @@ -2241,28 +2603,38 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core mean_grad = decay * mean_grad + (1-decay) * gradient Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)

                    ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - var <- var - mom

                    resourceSparseApplyCenteredRMSProp'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    mg: Should be from a Variable().

                    -> ResourceHandle

                    ms: Should be from a Variable().

                    -> ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'7 t

                    momentum

                    -> Tensor v'8 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'9 t

                    grad: The gradient.

                    -> Tensor v'10 tindices

                    indices: A vector of indices into the first dimension of var, ms and mom.

                    -> m' ControlNode 

                    resourceSparseApplyFtrl

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'9 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    Update relevant entries in '*var' according to the Ftrl-proximal scheme.

                    That is for rows we have grad for, we update var, accum and linear as follows: + var <- var - mom

                    resourceSparseApplyCenteredRMSProp' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    mg: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    ms: Should be from a Variable().

                    -> Tensor v'4 ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'5 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'6 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'7 t

                    momentum

                    -> Tensor v'8 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'9 t

                    grad: The gradient.

                    -> Tensor v'10 tindices

                    indices: A vector of indices into the first dimension of var, ms and mom.

                    -> m' ControlNode 

                    resourceSparseApplyFtrl Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'9 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    Update relevant entries in '*var' according to the Ftrl-proximal scheme.

                    That is for rows we have grad for, we update var, accum and linear as follows: accum_new = accum + grad * grad linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 - accum = accum_new

                    resourceSparseApplyFtrl'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'9 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    resourceSparseApplyMomentum

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    momentum: Momentum. Must be a scalar.

                    -> m' ControlNode 

                    Update relevant entries in '*var' and '*accum' according to the momentum scheme.

                    Set use_nesterov = True if you want to use Nesterov momentum.

                    That is for rows we have grad for, we update var and accum as follows:

                    accum = accum * momentum + grad - var -= lr * accum

                    resourceSparseApplyMomentum'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    momentum: Momentum. Must be a scalar.

                    -> m' ControlNode 

                    resourceSparseApplyProximalAdagrad

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'6 t

                    grad: The gradient.

                    -> Tensor v'7 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.

                    That is for rows we have grad for, we update var and accum as follows: + accum = accum_new

                    resourceSparseApplyFtrl' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'9 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    resourceSparseApplyFtrlV2 Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 shrinkage regulariation. Must be a scalar.

                    -> Tensor v'9 t

                    l2_shrinkage

                    -> Tensor v'10 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    Update relevant entries in '*var' according to the Ftrl-proximal scheme.

                    That is for rows we have grad for, we update var, accum and linear as follows: + grad_with_shrinkage = grad + 2 * l2_shrinkage * var + accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + linear += grad_with_shrinkage + + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new

                    resourceSparseApplyFtrlV2' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    linear: Should be from a Variable().

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'7 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'8 t

                    l2: L2 shrinkage regulariation. Must be a scalar.

                    -> Tensor v'9 t

                    l2_shrinkage

                    -> Tensor v'10 t

                    lr_power: Scaling factor. Must be a scalar.

                    -> m' ControlNode 

                    resourceSparseApplyMomentum Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    momentum: Momentum. Must be a scalar.

                    -> m' ControlNode 

                    Update relevant entries in '*var' and '*accum' according to the momentum scheme.

                    Set use_nesterov = True if you want to use Nesterov momentum.

                    That is for rows we have grad for, we update var and accum as follows:

                    accum = accum * momentum + grad + var -= lr * accum

                    resourceSparseApplyMomentum' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    grad: The gradient.

                    -> Tensor v'5 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> Tensor v'6 t

                    momentum: Momentum. Must be a scalar.

                    -> m' ControlNode 

                    resourceSparseApplyProximalAdagrad Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'6 t

                    grad: The gradient.

                    -> Tensor v'7 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.

                    That is for rows we have grad for, we update var and accum as follows: accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / sqrt(accum)) - var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

                    resourceSparseApplyProximalAdagrad'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'6 t

                    grad: The gradient.

                    -> Tensor v'7 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    resourceSparseApplyProximalGradientDescent

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'4 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    grad: The gradient.

                    -> Tensor v'6 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    Sparse update '*var' as FOBOS algorithm with fixed learning rate.

                    That is for rows we have grad for, we update var as follows: + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

                    resourceSparseApplyProximalAdagrad' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    accum: Should be from a Variable().

                    -> Tensor v'3 t

                    lr: Learning rate. Must be a scalar.

                    -> Tensor v'4 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'6 t

                    grad: The gradient.

                    -> Tensor v'7 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    resourceSparseApplyProximalGradientDescent Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'4 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    grad: The gradient.

                    -> Tensor v'6 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    Sparse update '*var' as FOBOS algorithm with fixed learning rate.

                    That is for rows we have grad for, we update var as follows: prox_v = var - alpha * grad - var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

                    resourceSparseApplyProximalGradientDescent'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'4 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    grad: The gradient.

                    -> Tensor v'6 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    resourceSparseApplyRMSProp

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    ms: Should be from a Variable().

                    -> ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'6 t

                    momentum

                    -> Tensor v'7 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'8 t

                    grad: The gradient.

                    -> Tensor v'9 tindices

                    indices: A vector of indices into the first dimension of var, ms and mom.

                    -> m' ControlNode 

                    Update '*var' according to the RMSProp algorithm.

                    Note that in dense implementation of this algorithm, ms and mom will + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

                    resourceSparseApplyProximalGradientDescent' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 t

                    alpha: Scaling factor. Must be a scalar.

                    -> Tensor v'3 t

                    l1: L1 regularization. Must be a scalar.

                    -> Tensor v'4 t

                    l2: L2 regularization. Must be a scalar.

                    -> Tensor v'5 t

                    grad: The gradient.

                    -> Tensor v'6 tindices

                    indices: A vector of indices into the first dimension of var and accum.

                    -> m' ControlNode 

                    resourceSparseApplyRMSProp Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    ms: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'6 t

                    momentum

                    -> Tensor v'7 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'8 t

                    grad: The gradient.

                    -> Tensor v'9 tindices

                    indices: A vector of indices into the first dimension of var, ms and mom.

                    -> m' ControlNode 

                    Update '*var' according to the RMSProp algorithm.

                    Note that in dense implementation of this algorithm, ms and mom will update even if the grad is zero, but in this sparse implementation, ms and mom will not update in iterations during which the grad is zero.

                    mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

                    ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - var <- var - mom

                    resourceSparseApplyRMSProp'

                    Arguments

                    :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                    => OpParams 
                    -> ResourceHandle

                    var: Should be from a Variable().

                    -> ResourceHandle

                    ms: Should be from a Variable().

                    -> ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'6 t

                    momentum

                    -> Tensor v'7 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'8 t

                    grad: The gradient.

                    -> Tensor v'9 tindices

                    indices: A vector of indices into the first dimension of var, ms and mom.

                    -> m' ControlNode 

                    restore

                    Arguments

                    :: TensorType dt 
                    => Tensor v'1 ByteString

                    file_pattern: Must have a single element. The pattern of the files from - which we read the tensor.

                    -> Tensor v'2 ByteString

                    tensor_name: Must have a single element. The name of the tensor to be - restored.

                    -> Tensor Build dt

                    tensor: The restored tensor.

                    Restores a tensor from checkpoint files.

                    Reads a tensor stored in one or several files. If there are several files (for + var <- var - mom

                    resourceSparseApplyRMSProp' Source #

                    Arguments

                    :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    var: Should be from a Variable().

                    -> Tensor v'2 ResourceHandle

                    ms: Should be from a Variable().

                    -> Tensor v'3 ResourceHandle

                    mom: Should be from a Variable().

                    -> Tensor v'4 t

                    lr: Scaling factor. Must be a scalar.

                    -> Tensor v'5 t

                    rho: Decay rate. Must be a scalar.

                    -> Tensor v'6 t

                    momentum

                    -> Tensor v'7 t

                    epsilon: Ridge term. Must be a scalar.

                    -> Tensor v'8 t

                    grad: The gradient.

                    -> Tensor v'9 tindices

                    indices: A vector of indices into the first dimension of var, ms and mom.

                    -> m' ControlNode 

                    resourceStridedSliceAssign Source #

                    Arguments

                    :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) 
                    => Tensor v'1 ResourceHandle

                    ref

                    -> Tensor v'2 index

                    begin

                    -> Tensor v'3 index

                    end

                    -> Tensor v'4 index

                    strides

                    -> Tensor v'5 t

                    value

                    -> m' ControlNode 

                    Assign value to the sliced l-value reference of ref.

                    The values of value are assigned to the positions in the variable + ref that are selected by the slice parameters. The slice parameters + `begin, end, strides, etc. work exactly as in StridedSlice.

                    NOTE this op currently does not support broadcasting and so value's + shape must be exactly the shape produced by the slice of ref.

                    resourceStridedSliceAssign' Source #

                    Arguments

                    :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) 
                    => OpParams 
                    -> Tensor v'1 ResourceHandle

                    ref

                    -> Tensor v'2 index

                    begin

                    -> Tensor v'3 index

                    end

                    -> Tensor v'4 index

                    strides

                    -> Tensor v'5 t

                    value

                    -> m' ControlNode 

                    restore Source #

                    Arguments

                    :: (MonadBuild m', TensorType dt) 
                    => Tensor v'1 ByteString

                    file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

                    -> Tensor v'2 ByteString

                    tensor_name: Must have a single element. The name of the tensor to be + restored.

                    -> m' (Tensor Value dt)

                    tensor: The restored tensor.

                    Restores a tensor from checkpoint files.

                    Reads a tensor stored in one or several files. If there are several files (for instance because a tensor was saved as slices), file_pattern may contain - wildcard symbols (* and ?) in the filename portion only, not in the + wildcard symbols (* and ?) in the filename portion only, not in the directory portion.

                    If a file_pattern matches several files, preferred_shard can be used to hint in which file the requested tensor is likely to be found. This op will first open the file at index preferred_shard in the list of matching files and try @@ -2271,19 +2643,19 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core preferred_shard to match the value passed as the shard input of a matching Save Op may speed up Restore. This attribute only affects performance, not correctness. The default value -1 means files are processed in - order.

                    See also RestoreSlice.

                    restore'

                    Arguments

                    :: TensorType dt 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    file_pattern: Must have a single element. The pattern of the files from - which we read the tensor.

                    -> Tensor v'2 ByteString

                    tensor_name: Must have a single element. The name of the tensor to be - restored.

                    -> Tensor Build dt

                    tensor: The restored tensor.

                    restoreSlice

                    Arguments

                    :: TensorType dt 
                    => Tensor v'1 ByteString

                    file_pattern: Must have a single element. The pattern of the files from - which we read the tensor.

                    -> Tensor v'2 ByteString

                    tensor_name: Must have a single element. The name of the tensor to be - restored.

                    -> Tensor v'3 ByteString

                    shape_and_slice: Scalar. The shapes and slice specifications to use when - restoring a tensors.

                    -> Tensor Build dt

                    tensor: The restored tensor.

                    Restores a tensor from checkpoint files.

                    This is like Restore except that restored tensor can be listed as filling + order.

                    See also RestoreSlice.

                    restore' Source #

                    Arguments

                    :: (MonadBuild m', TensorType dt) 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

                    -> Tensor v'2 ByteString

                    tensor_name: Must have a single element. The name of the tensor to be + restored.

                    -> m' (Tensor Value dt)

                    tensor: The restored tensor.

                    restoreSlice Source #

                    Arguments

                    :: (MonadBuild m', TensorType dt) 
                    => Tensor v'1 ByteString

                    file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

                    -> Tensor v'2 ByteString

                    tensor_name: Must have a single element. The name of the tensor to be + restored.

                    -> Tensor v'3 ByteString

                    shape_and_slice: Scalar. The shapes and slice specifications to use when + restoring a tensors.

                    -> m' (Tensor Value dt)

                    tensor: The restored tensor.

                    Restores a tensor from checkpoint files.

                    This is like Restore except that restored tensor can be listed as filling only a slice of a larger tensor. shape_and_slice specifies the shape of the larger tensor and the slice that the restored tensor covers.

                    The shape_and_slice input has the same format as the - elements of the shapes_and_slices input of the SaveSlices op.

                    restoreSlice'

                    Arguments

                    :: TensorType dt 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    file_pattern: Must have a single element. The pattern of the files from - which we read the tensor.

                    -> Tensor v'2 ByteString

                    tensor_name: Must have a single element. The name of the tensor to be - restored.

                    -> Tensor v'3 ByteString

                    shape_and_slice: Scalar. The shapes and slice specifications to use when - restoring a tensors.

                    -> Tensor Build dt

                    tensor: The restored tensor.

                    restoreV2

                    Arguments

                    :: TensorTypes dtypes 
                    => Tensor v'1 ByteString

                    prefix: Must have a single element. The prefix of a V2 checkpoint.

                    -> Tensor v'2 ByteString

                    tensor_names: shape {N}. The names of the tensors to be restored.

                    -> Tensor v'3 ByteString

                    shape_and_slices: shape {N}. The slice specs of the tensors to be restored. - Empty strings indicate that they are non-partitioned tensors.

                    -> TensorList Build dtypes

                    tensors: shape {N}. The restored tensors, whose shapes are read from the + elements of the shapes_and_slices input of the SaveSlices op.

                    restoreSlice' Source #

                    Arguments

                    :: (MonadBuild m', TensorType dt) 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

                    -> Tensor v'2 ByteString

                    tensor_name: Must have a single element. The name of the tensor to be + restored.

                    -> Tensor v'3 ByteString

                    shape_and_slice: Scalar. The shapes and slice specifications to use when + restoring a tensors.

                    -> m' (Tensor Value dt)

                    tensor: The restored tensor.

                    restoreV2 Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => Tensor v'1 ByteString

                    prefix: Must have a single element. The prefix of a V2 checkpoint.

                    -> Tensor v'2 ByteString

                    tensor_names: shape {N}. The names of the tensors to be restored.

                    -> Tensor v'3 ByteString

                    shape_and_slices: shape {N}. The slice specs of the tensors to be restored. + Empty strings indicate that they are non-partitioned tensors.

                    -> m' (TensorList Value dtypes)

                    tensors: shape {N}. The restored tensors, whose shapes are read from the checkpoint directly.

                    Restores tensors from a V2 checkpoint.

                    For backward compatibility with the V1 format, this Op currently allows restoring from a V1 checkpoint as well: - This Op first attempts to find the V2 index file pointed to by "prefix", and @@ -2292,12 +2664,12 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core Relying on this behavior is not recommended, as the ability to fall back to read V1 might be deprecated and eventually removed.

                    By default, restores the named tensors in full. If the caller wishes to restore specific slices of stored tensors, "shape_and_slices" should be non-empty - strings and correspondingly well-formed.

                    Callers must ensure all the named tensors are indeed stored in the checkpoint.

                    restoreV2'

                    Arguments

                    :: TensorTypes dtypes 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    prefix: Must have a single element. The prefix of a V2 checkpoint.

                    -> Tensor v'2 ByteString

                    tensor_names: shape {N}. The names of the tensors to be restored.

                    -> Tensor v'3 ByteString

                    shape_and_slices: shape {N}. The slice specs of the tensors to be restored. - Empty strings indicate that they are non-partitioned tensors.

                    -> TensorList Build dtypes

                    tensors: shape {N}. The restored tensors, whose shapes are read from the - checkpoint directly.

                    reverse

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => Tensor v'1 t

                    tensor: Up to 8-D.

                    -> Tensor v'2 Bool

                    dims: 1-D. The dimensions to reverse.

                    -> Tensor Build t

                    output: The same shape as tensor.

                    Reverses specific dimensions of a tensor.

                    Given a tensor, and a bool tensor dims representing the dimensions + strings and correspondingly well-formed.

                    Callers must ensure all the named tensors are indeed stored in the checkpoint.

                    restoreV2' Source #

                    Arguments

                    :: (MonadBuild m', TensorTypes dtypes) 
                    => OpParams 
                    -> Tensor v'1 ByteString

                    prefix: Must have a single element. The prefix of a V2 checkpoint.

                    -> Tensor v'2 ByteString

                    tensor_names: shape {N}. The names of the tensors to be restored.

                    -> Tensor v'3 ByteString

                    shape_and_slices: shape {N}. The slice specs of the tensors to be restored. + Empty strings indicate that they are non-partitioned tensors.

                    -> m' (TensorList Value dtypes)

                    tensors: shape {N}. The restored tensors, whose shapes are read from the + checkpoint directly.

                    reverse Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Bool, ByteString, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => Tensor v'1 t

                    tensor: Up to 8-D.

                    -> Tensor v'2 Bool

                    dims: 1-D. The dimensions to reverse.

                    -> Tensor Build t

                    output: The same shape as tensor.

                    Reverses specific dimensions of a tensor.

                    Given a tensor, and a bool tensor dims representing the dimensions of tensor, this operation reverses each dimension i of tensor where - `dims[i]` is True.

                    tensor can have up to 8 dimensions. The number of dimensions - of tensor must equal the number of elements in dims. In other words:

                    `rank(tensor) = size(dims)`

                    For example:

                    ```prettyprint + `dims[i]` is True.

                    tensor can have up to 8 dimensions. The number of dimensions + of tensor must equal the number of elements in dims. In other words:

                    `rank(tensor) = size(dims)`

                    For example:

                    ``` # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, 7], # [ 8, 9, 10, 11]], @@ -2323,13 +2695,13 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [[20, 21, 22, 23], [16, 17, 18, 19], [12, 13, 14, 15]]]] - ```

                    reverse'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    tensor: Up to 8-D.

                    -> Tensor v'2 Bool

                    dims: 1-D. The dimensions to reverse.

                    -> Tensor Build t

                    output: The same shape as tensor.

                    reverseSequence

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tlen) 
                    => Int64

                    seq_dim: The dimension which is partially reversed.

                    -> Tensor v'1 t

                    input: The input to reverse.

                    -> Tensor v'2 tlen

                    seq_lengths: 1-D with length `input.dims(batch_dim)` and - `max(seq_lengths) < input.dims(seq_dim)`

                    -> Tensor Build t

                    output: The partially reversed input. It has the same shape as input.

                    Reverses variable length slices.

                    This op first slices input along the dimension batch_dim, and for each + ```

                    reverse' Source #

                    Arguments

                    :: OneOf '[Complex Double, Complex Float, Bool, ByteString, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                    => OpParams 
                    -> Tensor v'1 t

                    tensor: Up to 8-D.

                    -> Tensor v'2 Bool

                    dims: 1-D. The dimensions to reverse.

                    -> Tensor Build t

                    output: The same shape as tensor.

                    reverseSequence Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tlen) 
                    => Int64

                    seq_dim: The dimension which is partially reversed.

                    -> Tensor v'1 t

                    input: The input to reverse.

                    -> Tensor v'2 tlen

                    seq_lengths: 1-D with length `input.dims(batch_dim)` and + `max(seq_lengths) <= input.dims(seq_dim)`

                    -> Tensor Build t

                    output: The partially reversed input. It has the same shape as input.

                    Reverses variable length slices.

                    This op first slices input along the dimension batch_dim, and for each slice i, reverses the first `seq_lengths[i]` elements along - the dimension seq_dim.

                    The elements of seq_lengths must obey `seq_lengths[i] < input.dims[seq_dim]`, + the dimension seq_dim.

                    The elements of seq_lengths must obey `seq_lengths[i] <= input.dims[seq_dim]`, and seq_lengths must be a vector of length `input.dims[batch_dim]`.

                    The output slice i along dimension batch_dim is then given by input slice i, with the first `seq_lengths[i]` slices along dimension - seq_dim reversed.

                    For example:

                    ```prettyprint + seq_dim reversed.

                    For example:

                    ``` # Given this: batch_dim = 0 seq_dim = 1 @@ -2343,7 +2715,7 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core output[1, 2:, :, ...] = input[1, 2:, :, ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :, ...] = input[3, 2:, :, ...] - ```

                    In contrast, if:

                    ```prettyprint + ```

                    In contrast, if:

                    ``` # Given this: batch_dim = 2 seq_dim = 0 @@ -2357,13 +2729,13 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] - ```

                    reverseSequence'

                    Arguments

                    :: (TensorType t, OneOf `[Int32, Int64]` tlen) 
                    => OpParams 
                    -> Int64

                    seq_dim: The dimension which is partially reversed.

                    -> Tensor v'1 t

                    input: The input to reverse.

                    -> Tensor v'2 tlen

                    seq_lengths: 1-D with length `input.dims(batch_dim)` and - `max(seq_lengths) < input.dims(seq_dim)`

                    -> Tensor Build t

                    output: The partially reversed input. It has the same shape as input.

                    reverseV2

                    Arguments

                    :: (OneOf `[Int32, Int64]` tidx, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => Tensor v'1 t

                    tensor: Up to 8-D.

                    -> Tensor v'2 tidx

                    axis: 1-D. The indices of the dimensions to reverse.

                    -> Tensor Build t

                    output: The same shape as tensor.

                    Reverses specific dimensions of a tensor.

                    NOTE `tf.reverse` has now changed behavior in preparation for 1.0. + ```

                    reverseSequence' Source #

                    Arguments

                    :: (TensorType t, OneOf '[Int32, Int64] tlen) 
                    => OpParams 
                    -> Int64

                    seq_dim: The dimension which is partially reversed.

                    -> Tensor v'1 t

                    input: The input to reverse.

                    -> Tensor v'2 tlen

                    seq_lengths: 1-D with length `input.dims(batch_dim)` and + `max(seq_lengths) <= input.dims(seq_dim)`

                    -> Tensor Build t

                    output: The partially reversed input. It has the same shape as input.

                    reverseV2 Source #

                    Arguments

                    :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, ByteString, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => Tensor v'1 t

                    tensor: Up to 8-D.

                    -> Tensor v'2 tidx

                    axis: 1-D. The indices of the dimensions to reverse.

                    -> Tensor Build t

                    output: The same shape as tensor.

                    Reverses specific dimensions of a tensor.

                    NOTE `tf.reverse` has now changed behavior in preparation for 1.0. `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.

                    Given a tensor, and a int32 tensor axis representing the set of dimensions of tensor to reverse. This operation reverses each dimension i for which there exists j s.t. `axis[j] == i`.

                    tensor can have up to 8 dimensions. The number of dimensions specified in axis may be 0 or more entries. If an index is specified more than - once, a InvalidArgument error is raised.

                    For example:

                    ```prettyprint + once, a InvalidArgument error is raised.

                    For example:

                    ``` # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, 7], # [ 8, 9, 10, 11]], @@ -2389,16 +2761,16 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [[20, 21, 22, 23], [16, 17, 18, 19], [12, 13, 14, 15]]]] - ```

                    reverseV2'

                    Arguments

                    :: (OneOf `[Int32, Int64]` tidx, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
                    => OpParams 
                    -> Tensor v'1 t

                    tensor: Up to 8-D.

                    -> Tensor v'2 tidx

                    axis: 1-D. The indices of the dimensions to reverse.

                    -> Tensor Build t

                    output: The same shape as tensor.

                    rint

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    Returns element-wise integer closest to x.

                    If the result is midway between two representable values, + ```

                    reverseV2' Source #

                    Arguments

                    :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, ByteString, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                    => OpParams 
                    -> Tensor v'1 t

                    tensor: Up to 8-D.

                    -> Tensor v'2 tidx

                    axis: 1-D. The indices of the dimensions to reverse.

                    -> Tensor Build t

                    output: The same shape as tensor.

                    rint Source #

                    Arguments

                    :: OneOf '[Double, Float] t 
                    => Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    Returns element-wise integer closest to x.

                    If the result is midway between two representable values, the even representable is chosen. For example:

                    ``` rint(-1.5) ==> -2.0 rint(0.5000001) ==> 1.0 rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] - ```

                    rint'

                    Arguments

                    :: OneOf `[Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    round

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    Rounds the values of a tensor to the nearest integer, element-wise.

                    Rounds half to even. Also known as bankers rounding. If you want to round - according to the current system rounding mode use std::cint.

                    rsqrt

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    Computes reciprocal of square root of x element-wise.

                    I.e., \(y = 1 / sqrt{x}\).

                    rsqrt'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor Build t

                    y

                    rsqrtGrad

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                    => Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    Computes the gradient for the rsqrt of x wrt its input.

                    Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and dy - is the corresponding input gradient.

                    rsqrtGrad'

                    Arguments

                    :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                    => OpParams 
                    -> Tensor v'1 t

                    x

                    -> Tensor v'2 t

                    y

                    -> Tensor Build t

                    z

                    sampleDistortedBoundingBox

                    Arguments

                    :: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8]` t) 
                    => Tensor v'1 t

                    image_size: 1-D, containing `[height, width, channels]`.

                    -> Tensor v'2 Float

                    bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes - associated with the image.

                    -> m' (Tensor Value t, Tensor Value t, Tensor Value Float)

                    (begin, size, bboxes)

                    • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to + ```

                      rint' Source #

                      Arguments

                      :: OneOf '[Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      round Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                      => Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      Rounds the values of a tensor to the nearest integer, element-wise.

                      Rounds half to even. Also known as bankers rounding. If you want to round + according to the current system rounding mode use std::cint.

                      rsqrt Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                      => Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      Computes reciprocal of square root of x element-wise.

                      I.e., \(y = 1 / sqrt{x}\).

                      rsqrtGrad Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                      => Tensor v'1 t

                      x

                      -> Tensor v'2 t

                      y

                      -> Tensor Build t

                      z

                      Computes the gradient for the rsqrt of x wrt its input.

                      Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and dy + is the corresponding input gradient.

                      rsqrtGrad' Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      x

                      -> Tensor v'2 t

                      y

                      -> Tensor Build t

                      z

                      sampleDistortedBoundingBox Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) 
                      => Tensor v'1 t

                      image_size: 1-D, containing `[height, width, channels]`.

                      -> Tensor v'2 Float

                      bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes + associated with the image.

                      -> m' (Tensor Value t, Tensor Value t, Tensor Value Float)

                      (begin, size, bboxes)

                      • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`.
                      • size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`.
                      • bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`.

                      Generate a single randomly distorted bounding box for an image.

                      Bounding box annotations are often supplied in addition to ground-truth labels @@ -2424,81 +2796,122 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core ```

                      Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If use_image_if_no_bounding_boxes is - false and no bounding boxes are supplied, an error is raised.

                      sampleDistortedBoundingBox'

                      Arguments

                      :: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8]` t) 
                      => OpParams 
                      -> Tensor v'1 t

                      image_size: 1-D, containing `[height, width, channels]`.

                      -> Tensor v'2 Float

                      bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes - associated with the image.

                      -> m' (Tensor Value t, Tensor Value t, Tensor Value Float)

                      (begin, size, bboxes)

                      • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to + false and no bounding boxes are supplied, an error is raised.

                        sampleDistortedBoundingBox' Source #

                        Arguments

                        :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) 
                        => OpParams 
                        -> Tensor v'1 t

                        image_size: 1-D, containing `[height, width, channels]`.

                        -> Tensor v'2 Float

                        bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes + associated with the image.

                        -> m' (Tensor Value t, Tensor Value t, Tensor Value Float)

                        (begin, size, bboxes)

                        • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`.
                        • size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`.
                        • bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. - Provide as input to `tf.image.draw_bounding_boxes`.

                        save

                        Arguments

                        :: (MonadBuild m', TensorTypes t) 
                        => Tensor v'1 ByteString

                        filename: Must have a single element. The name of the file to which we write - the tensor.

                        -> Tensor v'2 ByteString

                        tensor_names: Shape `[N]`. The names of the tensors to be saved.

                        -> TensorList v'3 t

                        data: N tensors to save.

                        -> m' ControlNode 

                        Saves the input tensors to disk.

                        The size of tensor_names must match the number of tensors in `data`. `data[i]` - is written to filename with name `tensor_names[i]`.

                        See also SaveSlices.

                        save'

                        Arguments

                        :: (MonadBuild m', TensorTypes t) 
                        => OpParams 
                        -> Tensor v'1 ByteString

                        filename: Must have a single element. The name of the file to which we write - the tensor.

                        -> Tensor v'2 ByteString

                        tensor_names: Shape `[N]`. The names of the tensors to be saved.

                        -> TensorList v'3 t

                        data: N tensors to save.

                        -> m' ControlNode 

                        saveSlices

                        Arguments

                        :: (MonadBuild m', TensorTypes t) 
                        => Tensor v'1 ByteString

                        filename: Must have a single element. The name of the file to which we write the - tensor.

                        -> Tensor v'2 ByteString

                        tensor_names: Shape `[N]`. The names of the tensors to be saved.

                        -> Tensor v'3 ByteString

                        shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when - saving the tensors.

                        -> TensorList v'4 t

                        data: N tensors to save.

                        -> m' ControlNode 

                        Saves input tensors slices to disk.

                        This is like Save except that tensors can be listed in the saved file as being + Provide as input to `tf.image.draw_bounding_boxes`.

                      sampleDistortedBoundingBoxV2 Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) 
                      => Tensor v'1 t

                      image_size: 1-D, containing `[height, width, channels]`.

                      -> Tensor v'2 Float

                      bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes + associated with the image.

                      -> Tensor v'3 Float

                      min_object_covered: The cropped area of the image must contain at least this + fraction of any bounding box supplied. The value of this parameter should be + non-negative. In the case of 0, the cropped area does not need to overlap + any of the bounding boxes supplied.

                      -> m' (Tensor Value t, Tensor Value t, Tensor Value Float)

                      (begin, size, bboxes)

                      • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to + `tf.slice`.
                      • size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to + `tf.slice`.
                      • bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. + Provide as input to `tf.image.draw_bounding_boxes`.

                      Generate a single randomly distorted bounding box for an image.

                      Bounding box annotations are often supplied in addition to ground-truth labels + in image recognition or object localization tasks. A common technique for + training such a system is to randomly distort an image while preserving + its content, i.e. *data augmentation*. This Op outputs a randomly distorted + localization of an object, i.e. bounding box, given an image_size, + bounding_boxes and a series of constraints.

                      The output of this Op is a single bounding box that may be used to crop the + original image. The output is returned as 3 tensors: begin, size and + bboxes. The first 2 tensors can be fed directly into `tf.slice` to crop the + image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + what the bounding box looks like.

                      Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + height of the underlying image.

                      For example,

                      ```python + # Generate a single distorted bounding box. + begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bounding_boxes)

                      # Draw the bounding box in an image summary. + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox_for_draw) + tf.image_summary(images_with_box, image_with_box)

                      # Employ the bounding box to distort the image. + distorted_image = tf.slice(image, begin, size) + ```

                      Note that if no bounding box information is available, setting + `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + bounding box covering the whole image. If use_image_if_no_bounding_boxes is + false and no bounding boxes are supplied, an error is raised.

                      sampleDistortedBoundingBoxV2' Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) 
                      => OpParams 
                      -> Tensor v'1 t

                      image_size: 1-D, containing `[height, width, channels]`.

                      -> Tensor v'2 Float

                      bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes + associated with the image.

                      -> Tensor v'3 Float

                      min_object_covered: The cropped area of the image must contain at least this + fraction of any bounding box supplied. The value of this parameter should be + non-negative. In the case of 0, the cropped area does not need to overlap + any of the bounding boxes supplied.

                      -> m' (Tensor Value t, Tensor Value t, Tensor Value Float)

                      (begin, size, bboxes)

                      • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to + `tf.slice`.
                      • size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to + `tf.slice`.
                      • bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. + Provide as input to `tf.image.draw_bounding_boxes`.

                      save Source #

                      Arguments

                      :: (MonadBuild m', TensorTypes t) 
                      => Tensor v'1 ByteString

                      filename: Must have a single element. The name of the file to which we write + the tensor.

                      -> Tensor v'2 ByteString

                      tensor_names: Shape `[N]`. The names of the tensors to be saved.

                      -> TensorList v'3 t

                      data: N tensors to save.

                      -> m' ControlNode 

                      Saves the input tensors to disk.

                      The size of tensor_names must match the number of tensors in `data`. `data[i]` + is written to filename with name `tensor_names[i]`.

                      See also SaveSlices.

                      save' Source #

                      Arguments

                      :: (MonadBuild m', TensorTypes t) 
                      => OpParams 
                      -> Tensor v'1 ByteString

                      filename: Must have a single element. The name of the file to which we write + the tensor.

                      -> Tensor v'2 ByteString

                      tensor_names: Shape `[N]`. The names of the tensors to be saved.

                      -> TensorList v'3 t

                      data: N tensors to save.

                      -> m' ControlNode 

                      saveSlices Source #

                      Arguments

                      :: (MonadBuild m', TensorTypes t) 
                      => Tensor v'1 ByteString

                      filename: Must have a single element. The name of the file to which we write the + tensor.

                      -> Tensor v'2 ByteString

                      tensor_names: Shape `[N]`. The names of the tensors to be saved.

                      -> Tensor v'3 ByteString

                      shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when + saving the tensors.

                      -> TensorList v'4 t

                      data: N tensors to save.

                      -> m' ControlNode 

                      Saves input tensors slices to disk.

                      This is like Save except that tensors can be listed in the saved file as being a slice of a larger tensor. shapes_and_slices specifies the shape of the larger tensor and the slice that this tensor covers. shapes_and_slices must have as many elements as tensor_names.

                      Elements of the shapes_and_slices input must either be:

                      • The empty string, in which case the corresponding tensor is saved normally.
                      • A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the dimI are the dimensions of the larger tensor and `slice-spec` specifies what part is covered by the tensor to save.

                      `slice-spec` itself is a :-separated list: `slice0:slice1:...:sliceN-1` - where each sliceI is either:

                      • The string - meaning that the slice covers all indices of this dimension
                      • `start,length` where start and length are integers. In that - case the slice covers length indices starting at start.

                      See also Save.

                      saveSlices'

                      Arguments

                      :: (MonadBuild m', TensorTypes t) 
                      => OpParams 
                      -> Tensor v'1 ByteString

                      filename: Must have a single element. The name of the file to which we write the - tensor.

                      -> Tensor v'2 ByteString

                      tensor_names: Shape `[N]`. The names of the tensors to be saved.

                      -> Tensor v'3 ByteString

                      shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when - saving the tensors.

                      -> TensorList v'4 t

                      data: N tensors to save.

                      -> m' ControlNode 

                      saveV2

                      Arguments

                      :: (MonadBuild m', TensorTypes dtypes) 
                      => Tensor v'1 ByteString

                      prefix: Must have a single element. The prefix of the V2 checkpoint to which we - write the tensors.

                      -> Tensor v'2 ByteString

                      tensor_names: shape {N}. The names of the tensors to be saved.

                      -> Tensor v'3 ByteString

                      shape_and_slices: shape {N}. The slice specs of the tensors to be saved. - Empty strings indicate that they are non-partitioned tensors.

                      -> TensorList v'4 dtypes

                      tensors: N tensors to save.

                      -> m' ControlNode 

                      Saves tensors in V2 checkpoint format.

                      By default, saves the named tensors in full. If the caller wishes to save + where each sliceI is either:

                      • The string - meaning that the slice covers all indices of this dimension
                      • `start,length` where start and length are integers. In that + case the slice covers length indices starting at start.

                      See also Save.

                      saveSlices' Source #

                      Arguments

                      :: (MonadBuild m', TensorTypes t) 
                      => OpParams 
                      -> Tensor v'1 ByteString

                      filename: Must have a single element. The name of the file to which we write the + tensor.

                      -> Tensor v'2 ByteString

                      tensor_names: Shape `[N]`. The names of the tensors to be saved.

                      -> Tensor v'3 ByteString

                      shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when + saving the tensors.

                      -> TensorList v'4 t

                      data: N tensors to save.

                      -> m' ControlNode 

                      saveV2 Source #

                      Arguments

                      :: (MonadBuild m', TensorTypes dtypes) 
                      => Tensor v'1 ByteString

                      prefix: Must have a single element. The prefix of the V2 checkpoint to which we + write the tensors.

                      -> Tensor v'2 ByteString

                      tensor_names: shape {N}. The names of the tensors to be saved.

                      -> Tensor v'3 ByteString

                      shape_and_slices: shape {N}. The slice specs of the tensors to be saved. + Empty strings indicate that they are non-partitioned tensors.

                      -> TensorList v'4 dtypes

                      tensors: N tensors to save.

                      -> m' ControlNode 

                      Saves tensors in V2 checkpoint format.

                      By default, saves the named tensors in full. If the caller wishes to save specific slices of full tensors, "shape_and_slices" should be non-empty strings - and correspondingly well-formed.

                      saveV2'

                      Arguments

                      :: (MonadBuild m', TensorTypes dtypes) 
                      => OpParams 
                      -> Tensor v'1 ByteString

                      prefix: Must have a single element. The prefix of the V2 checkpoint to which we - write the tensors.

                      -> Tensor v'2 ByteString

                      tensor_names: shape {N}. The names of the tensors to be saved.

                      -> Tensor v'3 ByteString

                      shape_and_slices: shape {N}. The slice specs of the tensors to be saved. - Empty strings indicate that they are non-partitioned tensors.

                      -> TensorList v'4 dtypes

                      tensors: N tensors to save.

                      -> m' ControlNode 

                      scalarSummary

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => Tensor v'1 ByteString

                      tags: Tags for the summary.

                      -> Tensor v'2 t

                      values: Same shape as `tags. Values for the summary.

                      -> Tensor Build ByteString

                      summary: Scalar. Serialized Summary protocol buffer.

                      Outputs a Summary protocol buffer with scalar values.

                      The input tags and values must have the same shape. The generated summary - has a summary value for each tag-value pair in tags and values.

                      scalarSummary'

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 ByteString

                      tags: Tags for the summary.

                      -> Tensor v'2 t

                      values: Same shape as `tags. Values for the summary.

                      -> Tensor Build ByteString

                      summary: Scalar. Serialized Summary protocol buffer.

                      scatterAdd

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want + and correspondingly well-formed.

                      saveV2' Source #

                      Arguments

                      :: (MonadBuild m', TensorTypes dtypes) 
                      => OpParams 
                      -> Tensor v'1 ByteString

                      prefix: Must have a single element. The prefix of the V2 checkpoint to which we + write the tensors.

                      -> Tensor v'2 ByteString

                      tensor_names: shape {N}. The names of the tensors to be saved.

                      -> Tensor v'3 ByteString

                      shape_and_slices: shape {N}. The slice specs of the tensors to be saved. + Empty strings indicate that they are non-partitioned tensors.

                      -> TensorList v'4 dtypes

                      tensors: N tensors to save.

                      -> m' ControlNode 

                      scalarSummary Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => Tensor v'1 ByteString

                      tags: Tags for the summary.

                      -> Tensor v'2 t

                      values: Same shape as `tags. Values for the summary.

                      -> Tensor Build ByteString

                      summary: Scalar. Serialized Summary protocol buffer.

                      Outputs a Summary protocol buffer with scalar values.

                      The input tags and values must have the same shape. The generated summary + has a summary value for each tag-value pair in tags and values.

                      scalarSummary' Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 ByteString

                      tags: Tags for the summary.

                      -> Tensor v'2 t

                      values: Same shape as `tags. Values for the summary.

                      -> Tensor Build ByteString

                      summary: Scalar. Serialized Summary protocol buffer.

                      scatterAdd Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.

                      Adds sparse updates to a variable reference.

                      This operation computes

                      # Scalar indices ref[indices, ...] += updates[...]

                      # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...]

                      # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]

                      This operation outputs ref after the update is done. This makes it easier to chain operations that need to use the reset value.

                      Duplicate entries are handled correctly: if multiple indices reference the same location, their contributions add.

                      Requires `updates.shape = indices.shape + ref.shape[1:]`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterAdd.png" alt - /div

                      scatterAdd'

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      scatterDiv

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of values that ref is divided by.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      Divides a variable reference by sparse updates.

                      This operation computes

                      # Scalar indices + style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt + /div

                      scatterAdd' Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      scatterDiv Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of values that ref is divided by.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      Divides a variable reference by sparse updates.

                      This operation computes

                      ```python + # Scalar indices ref[indices, ...] /= updates[...]

                      # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...]

                      # High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]

                      This operation outputs ref after the update is done. + ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + ```

                      This operation outputs ref after the update is done. This makes it easier to chain operations that need to use the reset value.

                      Duplicate entries are handled correctly: if multiple indices reference - the same location, their contributions divide.

                      Requires `updates.shape = indices.shape + ref.shape[1:]`.

                      scatterDiv'

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of values that ref is divided by.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      scatterMul

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to multiply to ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      Multiplies sparse updates into a variable reference.

                      This operation computes

                      # Scalar indices + the same location, their contributions divide.

                      Requires `updates.shape = indices.shape + ref.shape[1:]`.

                      scatterDiv' Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of values that ref is divided by.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      scatterMul Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to multiply to ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      Multiplies sparse updates into a variable reference.

                      This operation computes

                      ```python + # Scalar indices ref[indices, ...] *= updates[...]

                      # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...]

                      # High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]

                      This operation outputs ref after the update is done. + ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + ```

                      This operation outputs ref after the update is done. This makes it easier to chain operations that need to use the reset value.

                      Duplicate entries are handled correctly: if multiple indices reference - the same location, their contributions multiply.

                      Requires `updates.shape = indices.shape + ref.shape[1:]`.

                      scatterMul'

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to multiply to ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      scatterNd

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor v'1 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

                      -> Tensor v'2 t

                      updates: A Tensor. Must have the same type as tensor. A tensor of updated values - to store in ref.

                      -> Tensor v'3 tindices

                      shape: A vector. The shape of the resulting tensor.

                      -> Tensor Build t

                      output: A new tensor with the given shape and updates applied according - to the indices.

                      Creates a new tensor by applying sparse updates to individual

                      values or slices within a zero tensor of the given shape tensor according to - indices. This operator is the inverse of the tf.gather_nd - operator which extracts values or slices from a given tensor.

                      TODO(simister): Add a link to Variable.getitem documentation on slice - syntax.

                      shape is a TensorShape with rank P and indices is a Tensor of rank - Q.

                      indices must be integer tensor, containing indices into shape. - It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

                      The innermost dimension of indices (with length K) corresponds to - indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of shape.

                      updates is Tensor of rank `Q-1+P-K` with shape:

                      ``` - [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. - ```

                      The simplest form of scatter is to insert individual elements in a tensor by + the same location, their contributions multiply.

                      Requires `updates.shape = indices.shape + ref.shape[1:]`.

                      scatterMul' Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to multiply to ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      scatterNd Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] tindices) 
                      => Tensor v'1 tindices

                      indices: Index tensor.

                      -> Tensor v'2 t

                      updates: Updates to scatter into output.

                      -> Tensor v'3 tindices

                      shape: 1-D. The shape of the resulting tensor.

                      -> Tensor Build t

                      output: A new tensor with the given shape and updates applied according + to the indices.

                      Scatter updates into a new (initially zero) tensor according to indices.

                      Creates a new tensor by applying sparse updates to individual + values or slices within a zero tensor of the given shape according to + indices. This operator is the inverse of the @{tf.gather_nd} operator which + extracts values or slices from a given tensor.

                      • *WARNING**: The order in which updates are applied is nondeterministic, so the + output will be nondeterministic if indices contains duplicates.

                      indices is an integer tensor containing indices into a new tensor of shape + shape. The last dimension of indices can be at most the rank of shape:

                      indices.shape[-1] <= shape.rank

                      The last dimension of indices corresponds to indices into elements + (if `indices.shape[-1] = shape.rank`) or slices + (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + shape. updates is a tensor with shape

                      indices.shape[:-1] + shape[indices.shape[-1]:]

                      The simplest form of scatter is to insert individual elements in a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterNd1.png" alt - /div

                      In Python, this scatter operation would look like this:

                      indices = tf.constant([[4], [3], [1], [7]]) + style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt + /div

                      In Python, this scatter operation would look like this:

                      ```python + indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) shape = tf.constant([8]) scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as sess: - print sess.run(scatter)

                      The resulting tensor would look like this:

                      0, 11, 0, 10, 9, 0, 0, 12

                      We can also, insert entire slices of a higher rank tensor all at once. For + print(sess.run(scatter)) + ```

                      The resulting tensor would look like this:

                      0, 11, 0, 10, 9, 0, 0, 12

                      We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterNd2.png" alt - /div

                      In Python, this scatter operation would look like this:

                      indices = tf.constant([[0], [2]]) + style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt + /div

                      In Python, this scatter operation would look like this:

                      ```python + indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], @@ -2506,16 +2919,15 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core shape = tf.constant([4, 4, 4]) scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as sess: - print sess.run(scatter)

                      The resulting tensor would look like this:

                      [[5, 5, 5, 5
                      , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
                      [0, 0, 0, 0
                      , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
                      [5, 5, 5, 5
                      , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
                      [0, 0, 0, 0
                      , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]

                      scatterNd'

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor v'1 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

                      -> Tensor v'2 t

                      updates: A Tensor. Must have the same type as tensor. A tensor of updated values - to store in ref.

                      -> Tensor v'3 tindices

                      shape: A vector. The shape of the resulting tensor.

                      -> Tensor Build t

                      output: A new tensor with the given shape and updates applied according - to the indices.

                      scatterNdAdd

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values - to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      Applies sparse addition between updates and individual values or slices

                      within a given variable according to indices.

                      ref is a Tensor with rank P and indices is a Tensor of rank Q.

                      indices must be integer tensor, containing indices into ref. + print(sess.run(scatter)) + ```

                      The resulting tensor would look like this:

                      [[5, 5, 5, 5
                      , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
                      [0, 0, 0, 0
                      , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
                      [5, 5, 5, 5
                      , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
                      [0, 0, 0, 0
                      , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]

                      scatterNd' Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor v'1 tindices

                      indices: Index tensor.

                      -> Tensor v'2 t

                      updates: Updates to scatter into output.

                      -> Tensor v'3 tindices

                      shape: 1-D. The shape of the resulting tensor.

                      -> Tensor Build t

                      output: A new tensor with the given shape and updates applied according + to the indices.

                      scatterNdAdd Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values + to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      Applies sparse addition between updates and individual values or slices

                      within a given variable according to indices.

                      ref is a Tensor with rank P and indices is a Tensor of rank Q.

                      indices must be integer tensor, containing indices into ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

                      The innermost dimension of indices (with length K) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of ref.

                      updates is Tensor of rank `Q-1+P-K` with shape:

                      ``` + dimension of ref.

                      updates is Tensor of rank `Q-1+P-K` with shape:

                      ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```

                      For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this:

                      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) @@ -2523,17 +2935,37 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core updates = tf.constant([9, 10, 11, 12]) add = tf.scatter_nd_add(ref, indices, updates) with tf.Session() as sess: - print sess.run(add)

                      The resulting update to ref would look like this:

                      1, 13, 3, 14, 14, 6, 7, 20

                      See tf.scatter_nd for more details about how to make updates to - slices.

                      scatterNdAdd'

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values - to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      scatterNdSub

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values - to subtract from ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      Applies sparse subtraction between updates and individual values or slices

                      within a given variable according to indices.

                      ref is a Tensor with rank P and indices is a Tensor of rank Q.

                      indices must be integer tensor, containing indices into ref. + print sess.run(add)

                      The resulting update to ref would look like this:

                      1, 13, 3, 14, 14, 6, 7, 20

                      See @{tf.scatter_nd} for more details about how to make updates to + slices.

                      scatterNdAdd' Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values + to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      scatterNdNonAliasingAdd Source #

                      Arguments

                      :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor v'1 t

                      input: A Tensor.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into input.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values + to add to input.

                      -> Tensor Build t

                      output: A Tensor with the same shape as input, containing values of input + updated with updates.

                      Applies sparse addition to input using individual values or slices

                      from updates according to indices indices. The updates are non-aliasing: + input is only modified in-place if no other operations will use it. + Otherwise, a copy of input is made. This operation has a gradient with + respect to both input and updates.

                      input is a Tensor with rank P and indices is a Tensor of rank Q.

                      indices must be integer tensor, containing indices into input. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

                      The innermost dimension of indices (with length K) corresponds to + indices into elements (if `K = P`) or `(P-K)`-dimensional slices + (if `K < P`) along the Kth dimension of input.

                      updates is Tensor of rank `Q-1+P-K` with shape:

                      ``` + [d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]]. + ```

                      For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + elements. In Python, that addition would look like this:

                      input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1], [7]]) + updates = tf.constant([9, 10, 11, 12]) + output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + with tf.Session() as sess: + print(sess.run(output))

                      The resulting value output would look like this:

                      1, 13, 3, 14, 14, 6, 7, 20

                      See @{tf.scatter_nd} for more details about how to make updates to slices.

                      scatterNdNonAliasingAdd' Source #

                      Arguments

                      :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      input: A Tensor.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into input.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values + to add to input.

                      -> Tensor Build t

                      output: A Tensor with the same shape as input, containing values of input + updated with updates.

                      scatterNdSub Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values + to subtract from ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      Applies sparse subtraction between updates and individual values or slices

                      within a given variable according to indices.

                      ref is a Tensor with rank P and indices is a Tensor of rank Q.

                      indices must be integer tensor, containing indices into ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

                      The innermost dimension of indices (with length K) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of ref.

                      updates is Tensor of rank `Q-1+P-K` with shape:

                      ``` + dimension of ref.

                      updates is Tensor of rank `Q-1+P-K` with shape:

                      ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```

                      For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 elements. In Python, that subtraction would look like this:

                      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) @@ -2541,131 +2973,129 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core updates = tf.constant([9, 10, 11, 12]) sub = tf.scatter_nd_sub(ref, indices, updates) with tf.Session() as sess: - print sess.run(sub)

                      The resulting update to ref would look like this:

                      1, -9, 3, -6, -4, 6, 7, -4

                      See tf.scatter_nd for more details about how to make updates to - slices.

                      scatterNdSub'

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values - to subtract from ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      scatterNdUpdate

                      Arguments

                      :: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated - values to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want to - use the updated values after the update is done.

                      Applies sparse updates to individual values or slices within a given

                      variable according to indices.

                      ref is a Tensor with rank P and indices is a Tensor of rank Q.

                      indices must be integer tensor, containing indices into ref. + print sess.run(sub)

                      The resulting update to ref would look like this:

                      1, -9, 3, -6, -4, 6, 7, -4

                      See @{tf.scatter_nd} for more details about how to make updates to + slices.

                      scatterNdSub' Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated values + to subtract from ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      scatterNdUpdate Source #

                      Arguments

                      :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) 
                      => Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated + values to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want to + use the updated values after the update is done.

                      Applies sparse updates to individual values or slices within a given

                      variable according to indices.

                      ref is a Tensor with rank P and indices is a Tensor of rank Q.

                      indices must be integer tensor, containing indices into ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

                      The innermost dimension of indices (with length K) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of ref.

                      updates is Tensor of rank `Q-1+P-K` with shape:

                      ``` + dimension of ref.

                      updates is Tensor of rank `Q-1+P-K` with shape:

                      ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```

                      For example, say we want to update 4 scattered elements to a rank-1 tensor to - 8 elements. In Python, that update would look like this:

                      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + 8 elements. In Python, that update would look like this:

                      ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) update = tf.scatter_nd_update(ref, indices, updates) with tf.Session() as sess: - print sess.run(update)

                      The resulting update to ref would look like this:

                      1, 11, 3, 10, 9, 6, 7, 12

                      See tf.scatter_nd for more details about how to make updates to - slices.

                      scatterNdUpdate'

                      Arguments

                      :: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated - values to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want to - use the updated values after the update is done.

                      scatterSub

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to subtract from ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      Subtracts sparse updates to a variable reference.

                      # Scalar indices + print sess.run(update) + ```

                      The resulting update to ref would look like this:

                      1, 11, 3, 10, 9, 6, 7, 12

                      See @{tf.scatter_nd} for more details about how to make updates to + slices.

                      scatterNdUpdate' Source #

                      Arguments

                      :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: A mutable Tensor. Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

                      -> Tensor v'3 t

                      updates: A Tensor. Must have the same type as ref. A tensor of updated + values to add to ref.

                      -> m' (Tensor Ref t)

                      output_ref: Same as ref. Returned as a convenience for operations that want to + use the updated values after the update is done.

                      scatterSub Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to subtract from ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      Subtracts sparse updates to a variable reference.

                      ```python + # Scalar indices ref[indices, ...] -= updates[...]

                      # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...]

                      # High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]

                      This operation outputs ref after the update is done. + ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + ```

                      This operation outputs ref after the update is done. This makes it easier to chain operations that need to use the reset value.

                      Duplicate entries are handled correctly: if multiple indices reference the same location, their (negated) contributions add.

                      Requires `updates.shape = indices.shape + ref.shape[1:]`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterSub.png" alt - /div

                      scatterSub'

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to subtract from ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      scatterUpdate

                      Arguments

                      :: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to store in ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      Applies sparse updates to a variable reference.

                      This operation computes

                      # Scalar indices + style="width:100%" src="https://www.tensorflow.org/images/ScatterSub.png" alt + /div

                      scatterSub' Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to subtract from ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      scatterUpdate Source #

                      Arguments

                      :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) 
                      => Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to store in ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                      Applies sparse updates to a variable reference.

                      This operation computes

                      ```python + # Scalar indices ref[indices, ...] = updates[...]

                      # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...]

                      # High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]

                      This operation outputs ref after the update is done. + ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + ```

                      This operation outputs ref after the update is done. This makes it easier to chain operations that need to use the reset value.

                      If values in ref is to be updated more than once, because there are duplicate entries in indices, the order at which the updates happen for each value is undefined.

                      Requires `updates.shape = indices.shape + ref.shape[1:]`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterUpdate.png" alt - /div

                      scatterUpdate'

                      Arguments

                      :: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor Ref t

                      ref: Should be from a Variable node.

                      -> Tensor v'2 tindices

                      indices: A tensor of indices into the first dimension of ref.

                      -> Tensor v'3 t

                      updates: A tensor of updated values to store in ref.

                      -> m' (Tensor Ref t)

                      output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

                      sdcaFprint

                      Arguments

                      :: Tensor v'1 ByteString

                      input: vector of strings to compute fingerprints on.

                      -> Tensor Build Int64

                      output: a (N,2) shaped matrix where N is the number of elements in the input - vector. Each row contains the low and high parts of the fingerprint.

                      Computes fingerprints of the input strings.

                      sdcaFprint'

                      Arguments

                      :: OpParams 
                      -> Tensor v'1 ByteString

                      input: vector of strings to compute fingerprints on.

                      -> Tensor Build Int64

                      output: a (N,2) shaped matrix where N is the number of elements in the input - vector. Each row contains the low and high parts of the fingerprint.

                      sdcaOptimizer

                      Arguments

                      :: Float

                      l1: Symmetric l1 regularization strength.

                      -> Float

                      l2: Symmetric l2 regularization strength.

                      -> Int64

                      num_inner_iterations: Number of iterations per mini-batch.

                      -> Int64

                      num_loss_partitions: Number of partitions of the global loss function.

                      -> [Tensor v'1 Int64]

                      sparse_example_indices: a list of vectors which contain example indices.

                      -> [Tensor v'2 Int64]

                      sparse_feature_indices: a list of vectors which contain feature indices.

                      -> [Tensor v'3 Float]

                      sparse_feature_values: a list of vectors which contains feature value - associated with each feature group.

                      -> [Tensor v'4 Float]

                      dense_features: a list of matrices which contains the dense feature values.

                      -> Tensor v'5 Float

                      example_weights: a vector which contains the weight associated with each - example.

                      -> Tensor v'6 Float

                      example_labels: a vector which contains the label/target associated with each - example.

                      -> [Tensor v'7 Int64]

                      sparse_indices: a list of vectors where each value is the indices which has - corresponding weights in sparse_weights. This field maybe ommitted for the - dense approach.

                      -> [Tensor v'8 Float]

                      sparse_weights: a list of vectors where each value is the weight associated with - a sparse feature group.

                      -> [Tensor v'9 Float]

                      dense_weights: a list of vectors where the values are the weights associated - with a dense feature group.

                      -> Tensor v'10 Float

                      example_state_data: a list of vectors containing the example state data.

                      -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])

                      (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights)

                      • out_example_state_data: a list of vectors containing the updated example state + style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt + /div

                        scatterUpdate' Source #

                        Arguments

                        :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) 
                        => OpParams 
                        -> Tensor Ref t

                        ref: Should be from a Variable node.

                        -> Tensor v'2 tindices

                        indices: A tensor of indices into the first dimension of ref.

                        -> Tensor v'3 t

                        updates: A tensor of updated values to store in ref.

                        -> m' (Tensor Ref t)

                        output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

                        sdcaFprint Source #

                        Arguments

                        :: Tensor v'1 ByteString

                        input: vector of strings to compute fingerprints on.

                        -> Tensor Build Int64

                        output: a (N,2) shaped matrix where N is the number of elements in the input + vector. Each row contains the low and high parts of the fingerprint.

                        Computes fingerprints of the input strings.

                        sdcaFprint' Source #

                        Arguments

                        :: OpParams 
                        -> Tensor v'1 ByteString

                        input: vector of strings to compute fingerprints on.

                        -> Tensor Build Int64

                        output: a (N,2) shaped matrix where N is the number of elements in the input + vector. Each row contains the low and high parts of the fingerprint.

                        Objective = sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$

                        Adding vs. Averaging in Distributed Primal-Dual Optimization.br + Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, + Peter Richtarik, Martin Takac. 2015

                        Stochastic Dual Coordinate Ascent with Adaptive Probabilities.br + Dominik Csiba, Zheng Qu, Peter Richtarik. 2015

                        sdcaOptimizer Source #

                        Arguments

                        :: Float

                        l1: Symmetric l1 regularization strength.

                        -> Float

                        l2: Symmetric l2 regularization strength.

                        -> Int64

                        num_inner_iterations: Number of iterations per mini-batch.

                        -> Int64

                        num_loss_partitions: Number of partitions of the global loss function.

                        -> [Tensor v'1 Int64]

                        sparse_example_indices: a list of vectors which contain example indices.

                        -> [Tensor v'2 Int64]

                        sparse_feature_indices: a list of vectors which contain feature indices.

                        -> [Tensor v'3 Float]

                        sparse_feature_values: a list of vectors which contains feature value + associated with each feature group.

                        -> [Tensor v'4 Float]

                        dense_features: a list of matrices which contains the dense feature values.

                        -> Tensor v'5 Float

                        example_weights: a vector which contains the weight associated with each + example.

                        -> Tensor v'6 Float

                        example_labels: a vector which contains the label/target associated with each + example.

                        -> [Tensor v'7 Int64]

                        sparse_indices: a list of vectors where each value is the indices which has + corresponding weights in sparse_weights. This field maybe omitted for the + dense approach.

                        -> [Tensor v'8 Float]

                        sparse_weights: a list of vectors where each value is the weight associated with + a sparse feature group.

                        -> [Tensor v'9 Float]

                        dense_weights: a list of vectors where the values are the weights associated + with a dense feature group.

                        -> Tensor v'10 Float

                        example_state_data: a list of vectors containing the example state data.

                        -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])

                        (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights)

                        • out_example_state_data: a list of vectors containing the updated example state data.
                        • out_delta_sparse_weights: a list of vectors where each value is the delta weights associated with a sparse feature group.
                        • out_delta_dense_weights: a list of vectors where the values are the delta - weights associated with a dense feature group.

                        Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for

                        linear models with L1 + L2 regularization. As global optimization objective is - strongly-convex, the optimizer optimizes the dual objective at each step. The - optimizer applies each update one example at a time. Examples are sampled - uniformly, and the optimizer is learning rate free and enjoys linear convergence - rate.

                        Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; Zhang, Tong. - 2012 arXiv1211.2717S: http://arxiv.org/pdf/1211.2717v1.pdf

                        Loss objective = sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|

                        Adding vs. Averaging in Distributed Primal-Dual Optimization. - Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, - Martin Takac http://arxiv.org/abs/1502.03508

                        Stochastic Dual Coordinate Ascent with Adaptive Probabilities - Dominik Csiba, Zheng Qu, Peter Richtarik https://arxiv.org/abs/1502.08053

                        sdcaOptimizer'

                        Arguments

                        :: OpParams 
                        -> Float

                        l1: Symmetric l1 regularization strength.

                        -> Float

                        l2: Symmetric l2 regularization strength.

                        -> Int64

                        num_inner_iterations: Number of iterations per mini-batch.

                        -> Int64

                        num_loss_partitions: Number of partitions of the global loss function.

                        -> [Tensor v'1 Int64]

                        sparse_example_indices: a list of vectors which contain example indices.

                        -> [Tensor v'2 Int64]

                        sparse_feature_indices: a list of vectors which contain feature indices.

                        -> [Tensor v'3 Float]

                        sparse_feature_values: a list of vectors which contains feature value - associated with each feature group.

                        -> [Tensor v'4 Float]

                        dense_features: a list of matrices which contains the dense feature values.

                        -> Tensor v'5 Float

                        example_weights: a vector which contains the weight associated with each - example.

                        -> Tensor v'6 Float

                        example_labels: a vector which contains the label/target associated with each - example.

                        -> [Tensor v'7 Int64]

                        sparse_indices: a list of vectors where each value is the indices which has - corresponding weights in sparse_weights. This field maybe ommitted for the - dense approach.

                        -> [Tensor v'8 Float]

                        sparse_weights: a list of vectors where each value is the weight associated with - a sparse feature group.

                        -> [Tensor v'9 Float]

                        dense_weights: a list of vectors where the values are the weights associated - with a dense feature group.

                        -> Tensor v'10 Float

                        example_state_data: a list of vectors containing the example state data.

                        -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])

                        (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights)

                        • out_example_state_data: a list of vectors containing the updated example state + weights associated with a dense feature group.

                        sdcaOptimizer' Source #

                        Arguments

                        :: OpParams 
                        -> Float

                        l1: Symmetric l1 regularization strength.

                        -> Float

                        l2: Symmetric l2 regularization strength.

                        -> Int64

                        num_inner_iterations: Number of iterations per mini-batch.

                        -> Int64

                        num_loss_partitions: Number of partitions of the global loss function.

                        -> [Tensor v'1 Int64]

                        sparse_example_indices: a list of vectors which contain example indices.

                        -> [Tensor v'2 Int64]

                        sparse_feature_indices: a list of vectors which contain feature indices.

                        -> [Tensor v'3 Float]

                        sparse_feature_values: a list of vectors which contains feature value + associated with each feature group.

                        -> [Tensor v'4 Float]

                        dense_features: a list of matrices which contains the dense feature values.

                        -> Tensor v'5 Float

                        example_weights: a vector which contains the weight associated with each + example.

                        -> Tensor v'6 Float

                        example_labels: a vector which contains the label/target associated with each + example.

                        -> [Tensor v'7 Int64]

                        sparse_indices: a list of vectors where each value is the indices which has + corresponding weights in sparse_weights. This field maybe omitted for the + dense approach.

                        -> [Tensor v'8 Float]

                        sparse_weights: a list of vectors where each value is the weight associated with + a sparse feature group.

                        -> [Tensor v'9 Float]

                        dense_weights: a list of vectors where the values are the weights associated + with a dense feature group.

                        -> Tensor v'10 Float

                        example_state_data: a list of vectors containing the example state data.

                        -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])

                        (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights)

                        • out_example_state_data: a list of vectors containing the updated example state data.
                        • out_delta_sparse_weights: a list of vectors where each value is the delta weights associated with a sparse feature group.
                        • out_delta_dense_weights: a list of vectors where the values are the delta - weights associated with a dense feature group.

                        sdcaShrinkL1

                        Arguments

                        :: MonadBuild m' 
                        => Float

                        l1: Symmetric l1 regularization strength.

                        -> Float

                        l2: Symmetric l2 regularization strength. Should be a positive float.

                        -> [Tensor Ref Float]

                        weights: a list of vectors where each value is the weight associated with a - feature group.

                        -> m' ControlNode 

                        Applies L1 regularization shrink step on the parameters.

                        sdcaShrinkL1'

                        Arguments

                        :: MonadBuild m' 
                        => OpParams 
                        -> Float

                        l1: Symmetric l1 regularization strength.

                        -> Float

                        l2: Symmetric l2 regularization strength. Should be a positive float.

                        -> [Tensor Ref Float]

                        weights: a list of vectors where each value is the weight associated with a - feature group.

                        -> m' ControlNode 

                        segmentMax

                        Arguments

                        :: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                        => Tensor v'1 t

                        data

                        -> Tensor v'2 tindices

                        segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                        -> Tensor Build t

                        output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                        Computes the maximum along segments of a tensor.

                        Read the section on Segmentation - for an explanation of segments.

                        Computes a tensor such that - \(output_i = max_j(data_j)\) where max is over j such - that `segment_ids[j] == i`.

                        style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentMax.png" alt - /div

                        segmentMax'

                        Arguments

                        :: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                        => OpParams 
                        -> Tensor v'1 t

                        data

                        -> Tensor v'2 tindices

                        segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                        -> Tensor Build t

                        output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                        segmentMean

                        Arguments

                        :: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                        => Tensor v'1 t

                        data

                        -> Tensor v'2 tindices

                        segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                        -> Tensor Build t

                        output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                        Computes the mean along segments of a tensor.

                        Read the section on - Segmentation for an explanation - of segments.

                        Computes a tensor such that + weights associated with a dense feature group.

                      sdcaShrinkL1 Source #

                      Arguments

                      :: MonadBuild m' 
                      => Float

                      l1: Symmetric l1 regularization strength.

                      -> Float

                      l2: Symmetric l2 regularization strength. Should be a positive float.

                      -> [Tensor Ref Float]

                      weights: a list of vectors where each value is the weight associated with a + feature group.

                      -> m' ControlNode 

                      Applies L1 regularization shrink step on the parameters.

                      sdcaShrinkL1' Source #

                      Arguments

                      :: MonadBuild m' 
                      => OpParams 
                      -> Float

                      l1: Symmetric l1 regularization strength.

                      -> Float

                      l2: Symmetric l2 regularization strength. Should be a positive float.

                      -> [Tensor Ref Float]

                      weights: a list of vectors where each value is the weight associated with a + feature group.

                      -> m' ControlNode 

                      segmentMax Source #

                      Arguments

                      :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      Computes the maximum along segments of a tensor.

                      Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                      Computes a tensor such that + \(output_i = max_j(data_j)\) where max is over j such + that `segment_ids[j] == i`.

                      If the max is empty for a given segment ID i, `output[i] = 0`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt + /div

                      segmentMax' Source #

                      Arguments

                      :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      segmentMean Source #

                      Arguments

                      :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      Computes the mean along segments of a tensor.

                      Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                      Computes a tensor such that \(output_i = frac{sum_j data_j}{N}\) where mean is over j such that `segment_ids[j] == i` and N is the total number of - values summed.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentMean.png" alt - /div

                      segmentMean'

                      Arguments

                      :: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                      segmentMin

                      Arguments

                      :: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                      Computes the minimum along segments of a tensor.

                      Read the section on - Segmentation for an explanation - of segments.

                      Computes a tensor such that - \(output_i = min_j(data_j)\) where min is over j such - that `segment_ids[j] == i`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentMin.png" alt - /div

                      segmentMin'

                      Arguments

                      :: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                      segmentProd

                      Arguments

                      :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                      Computes the product along segments of a tensor.

                      Read the section on - Segmentation for an explanation - of segments.

                      Computes a tensor such that + values summed.

                      If the mean is empty for a given segment ID i, `output[i] = 0`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt + /div

                      segmentMean' Source #

                      Arguments

                      :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      segmentMin Source #

                      Arguments

                      :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      Computes the minimum along segments of a tensor.

                      Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                      Computes a tensor such that + \(output_i = min_j(data_j)\) where min is over j such + that `segment_ids[j] == i`.

                      If the min is empty for a given segment ID i, `output[i] = 0`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt + /div

                      segmentMin' Source #

                      Arguments

                      :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      segmentProd Source #

                      Arguments

                      :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      Computes the product along segments of a tensor.

                      Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                      Computes a tensor such that \(output_i = prod_j data_j\) where the product is over j such - that `segment_ids[j] == i`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentProd.png" alt - /div

                      segmentProd'

                      Arguments

                      :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                      segmentSum

                      Arguments

                      :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                      Computes the sum along segments of a tensor.

                      Read the section on Segmentation - for an explanation of segments.

                      Computes a tensor such that + that `segment_ids[j] == i`.

                      If the product is empty for a given segment ID i, `output[i] = 1`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt + /div

                      segmentProd' Source #

                      Arguments

                      :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      segmentSum Source #

                      Arguments

                      :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      Computes the sum along segments of a tensor.

                      Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                      Computes a tensor such that \(output_i = sum_j data_j\) where sum is over j such - that `segment_ids[j] == i`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentSum.png" alt - /div

                      segmentSum'

                      Arguments

                      :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                      select

                      Arguments

                      :: TensorType t 
                      => Tensor v'1 Bool

                      condition

                      -> Tensor v'2 t

                      t: = A Tensor which may have the same shape as condition. + that `segment_ids[j] == i`.

                      If the sum is empty for a given segment ID i, `output[i] = 0`.

                      style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt + /div

                      segmentSum' Source #

                      Arguments

                      :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                      => OpParams 
                      -> Tensor v'1 t

                      data

                      -> Tensor v'2 tindices

                      segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

                      -> Tensor Build t

                      output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                      select Source #

                      Arguments

                      :: TensorType t 
                      => Tensor v'1 Bool

                      condition

                      -> Tensor v'2 t

                      t: = A Tensor which may have the same shape as condition. If condition is rank 1, t may have higher rank, - but its first dimension must match the size of condition.

                      -> Tensor v'3 t

                      e: = A Tensor with the same type and shape as t.

                      -> Tensor Build t

                      output: = A Tensor with the same type and shape as t and e.

                      Selects elements from t or e, depending on condition.

                      The t, and e tensors must all have the same shape, and the + but its first dimension must match the size of condition.

                      -> Tensor v'3 t

                      e: = A Tensor with the same type and shape as t.

                      -> Tensor Build t

                      output: = A Tensor with the same type and shape as t and e.

                      Selects elements from t or e, depending on condition.

                      The t, and e tensors must all have the same shape, and the output will also have that shape.

                      The condition tensor must be a scalar if t and e are scalars. If t and e are vectors or higher rank, then condition must be either a scalar, a vector with size matching the first dimension of t, or must have @@ -2674,136 +3104,145 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core taken from t (if true) or e (if false).

                      If condition is a vector and t and e are higher rank matrices, then it chooses which row (outer dimension) to copy from t and e. If condition has the same shape as t and e, then it chooses which - element to copy from t and e.

                      For example:

                      ```prettyprint + element to copy from t and e.

                      For example:

                      ```python # condition tensor is [[True, False] # [False, True]] # t is [[1, 2], # [3, 4]] # e is [[5, 6], # [7, 8]] - select(condition, t, e) ==> [[1, 6], - [7, 4]]

                      # condition tensor is [True, False] + select(condition, t, e) # => [[1, 6], [7, 4]]

                      # condition tensor is [True, False] # t is [[1, 2], # [3, 4]] # e is [[5, 6], # [7, 8]] select(condition, t, e) ==> [[1, 2], - [7, 8]]

                      ```

                      select'

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Tensor v'1 Bool

                      condition

                      -> Tensor v'2 t

                      t: = A Tensor which may have the same shape as condition. + [7, 8]]

                      ```

                      select' Source #

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Tensor v'1 Bool

                      condition

                      -> Tensor v'2 t

                      t: = A Tensor which may have the same shape as condition. If condition is rank 1, t may have higher rank, - but its first dimension must match the size of condition.

                      -> Tensor v'3 t

                      e: = A Tensor with the same type and shape as t.

                      -> Tensor Build t

                      output: = A Tensor with the same type and shape as t and e.

                      selfAdjointEig

                      Arguments

                      :: OneOf `[Double, Float]` t 
                      => Tensor v'1 t

                      input: Shape is `[..., M, M]`.

                      -> Tensor Build t

                      output: Shape is `[..., M+1, M]`.

                      Computes the Eigen Decomposition of a batch of square self-adjoint matrices.

                      The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + but its first dimension must match the size of condition.

                      -> Tensor v'3 t

                      e: = A Tensor with the same type and shape as t.

                      -> Tensor Build t

                      output: = A Tensor with the same type and shape as t and e.

                      selfAdjointEig Source #

                      Arguments

                      :: OneOf '[Double, Float] t 
                      => Tensor v'1 t

                      input: Shape is `[..., M, M]`.

                      -> Tensor Build t

                      output: Shape is `[..., M+1, M]`.

                      Computes the Eigen Decomposition of a batch of square self-adjoint matrices.

                      The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices, with the same constraints as the single matrix SelfAdjointEig.

                      The result is a [..., M+1, M] matrix with [..., 0,:] containing the - eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.

                      selfAdjointEig'

                      Arguments

                      :: OneOf `[Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      input: Shape is `[..., M, M]`.

                      -> Tensor Build t

                      output: Shape is `[..., M+1, M]`.

                      selfAdjointEigV2

                      Arguments

                      :: OneOf `[Double, Float]` t 
                      => Tensor v'1 t

                      input: Tensor input of shape `[N, N]`.

                      -> (Tensor Build t, Tensor Build t)

                      (e, v)

                      • e: Eigenvalues. Shape is `[N]`.
                      • v: Eigenvectors. Shape is `[N, N]`.

                      Computes the eigen decomposition of one or more square self-adjoint matrices.

                      Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in - input such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.

                      ```prettyprint + eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.

                      selfAdjointEig' Source #

                      Arguments

                      :: OneOf '[Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      input: Shape is `[..., M, M]`.

                      -> Tensor Build t

                      output: Shape is `[..., M+1, M]`.

                      selfAdjointEigV2 Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                      => Tensor v'1 t

                      input: Tensor input of shape `[N, N]`.

                      -> (Tensor Build t, Tensor Build t)

                      (e, v)

                      • e: Eigenvalues. Shape is `[N]`.
                      • v: Eigenvectors. Shape is `[N, N]`.

                      Computes the eigen decomposition of one or more square self-adjoint matrices.

                      Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in + input such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.

                      ```python # a is a tensor. # e is a tensor of eigenvalues. # v is a tensor of eigenvectors. e, v = self_adjoint_eig(a) e = self_adjoint_eig(a, compute_v=False) - ```

                      selfAdjointEigV2'

                      Arguments

                      :: OneOf `[Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      input: Tensor input of shape `[N, N]`.

                      -> (Tensor Build t, Tensor Build t)

                      (e, v)

                      • e: Eigenvalues. Shape is `[N]`.
                      • v: Eigenvectors. Shape is `[N, N]`.

                      serializeManySparse

                      Arguments

                      :: TensorType t 
                      => Tensor v'1 Int64

                      sparse_indices: 2-D. The indices of the minibatch SparseTensor.

                      -> Tensor v'2 t

                      sparse_values: 1-D. The values of the minibatch SparseTensor.

                      -> Tensor v'3 Int64

                      sparse_shape: 1-D. The shape of the minibatch SparseTensor.

                      -> Tensor Build ByteString

                      serialized_sparse

                      Serialize an N-minibatch SparseTensor into an `[N, 3]` string Tensor.

                      The SparseTensor must have rank R greater than 1, and the first dimension + ```

                      selfAdjointEigV2' Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      input: Tensor input of shape `[N, N]`.

                      -> (Tensor Build t, Tensor Build t)

                      (e, v)

                      • e: Eigenvalues. Shape is `[N]`.
                      • v: Eigenvectors. Shape is `[N, N]`.

                      serializeManySparse Source #

                      Arguments

                      :: TensorType t 
                      => Tensor v'1 Int64

                      sparse_indices: 2-D. The indices of the minibatch SparseTensor.

                      -> Tensor v'2 t

                      sparse_values: 1-D. The values of the minibatch SparseTensor.

                      -> Tensor v'3 Int64

                      sparse_shape: 1-D. The shape of the minibatch SparseTensor.

                      -> Tensor Build ByteString

                      serialized_sparse

                      Serialize an N-minibatch SparseTensor into an `[N, 3]` string Tensor.

                      The SparseTensor must have rank R greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the SparseTensor must be sorted in increasing order of this first dimension. The serialized SparseTensor objects going into each row of serialized_sparse will have - rank `R-1`.

                      The minibatch size N is extracted from `sparse_shape[0]`.

                      serializeManySparse'

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      sparse_indices: 2-D. The indices of the minibatch SparseTensor.

                      -> Tensor v'2 t

                      sparse_values: 1-D. The values of the minibatch SparseTensor.

                      -> Tensor v'3 Int64

                      sparse_shape: 1-D. The shape of the minibatch SparseTensor.

                      -> Tensor Build ByteString

                      serialized_sparse

                      serializeSparse

                      Arguments

                      :: TensorType t 
                      => Tensor v'1 Int64

                      sparse_indices: 2-D. The indices of the SparseTensor.

                      -> Tensor v'2 t

                      sparse_values: 1-D. The values of the SparseTensor.

                      -> Tensor v'3 Int64

                      sparse_shape: 1-D. The shape of the SparseTensor.

                      -> Tensor Build ByteString

                      serialized_sparse

                      Serialize a SparseTensor into a string 3-vector (1-D Tensor) object.

                      serializeSparse'

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      sparse_indices: 2-D. The indices of the SparseTensor.

                      -> Tensor v'2 t

                      sparse_values: 1-D. The values of the SparseTensor.

                      -> Tensor v'3 Int64

                      sparse_shape: 1-D. The shape of the SparseTensor.

                      -> Tensor Build ByteString

                      serialized_sparse

                      setSize

                      Arguments

                      :: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
                      => Tensor v'1 Int64

                      set_indices: 2D Tensor, indices of a SparseTensor.

                      -> Tensor v'2 t

                      set_values: 1D Tensor, values of a SparseTensor.

                      -> Tensor v'3 Int64

                      set_shape: 1D Tensor, shape of a SparseTensor.

                      -> Tensor Build Int32

                      size: For set ranked n, this is a Tensor with rank `n-1`, and the same 1st + rank `R-1`.

                      The minibatch size N is extracted from `sparse_shape[0]`.

                      serializeManySparse' Source #

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      sparse_indices: 2-D. The indices of the minibatch SparseTensor.

                      -> Tensor v'2 t

                      sparse_values: 1-D. The values of the minibatch SparseTensor.

                      -> Tensor v'3 Int64

                      sparse_shape: 1-D. The shape of the minibatch SparseTensor.

                      -> Tensor Build ByteString

                      serialized_sparse

                      serializeSparse Source #

                      Arguments

                      :: TensorType t 
                      => Tensor v'1 Int64

                      sparse_indices: 2-D. The indices of the SparseTensor.

                      -> Tensor v'2 t

                      sparse_values: 1-D. The values of the SparseTensor.

                      -> Tensor v'3 Int64

                      sparse_shape: 1-D. The shape of the SparseTensor.

                      -> Tensor Build ByteString

                      serialized_sparse

                      Serialize a SparseTensor into a string 3-vector (1-D Tensor) object.

                      serializeSparse' Source #

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      sparse_indices: 2-D. The indices of the SparseTensor.

                      -> Tensor v'2 t

                      sparse_values: 1-D. The values of the SparseTensor.

                      -> Tensor v'3 Int64

                      sparse_shape: 1-D. The shape of the SparseTensor.

                      -> Tensor Build ByteString

                      serialized_sparse

                      setSize Source #

                      Arguments

                      :: OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t 
                      => Tensor v'1 Int64

                      set_indices: 2D Tensor, indices of a SparseTensor.

                      -> Tensor v'2 t

                      set_values: 1D Tensor, values of a SparseTensor.

                      -> Tensor v'3 Int64

                      set_shape: 1D Tensor, shape of a SparseTensor.

                      -> Tensor Build Int32

                      size: For set ranked n, this is a Tensor with rank `n-1`, and the same 1st `n-1` dimensions as set. Each value is the number of unique elements in the corresponding `[0...n-1]` dimension of set.

                      Number of unique elements along last dimension of input set.

                      Input set is a SparseTensor represented by set_indices, set_values, and set_shape. The last dimension contains values in a set, duplicates are - allowed but ignored.

                      If validate_indices is True, this op validates the order and range of set - indices.

                      setSize'

                      Arguments

                      :: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      set_indices: 2D Tensor, indices of a SparseTensor.

                      -> Tensor v'2 t

                      set_values: 1D Tensor, values of a SparseTensor.

                      -> Tensor v'3 Int64

                      set_shape: 1D Tensor, shape of a SparseTensor.

                      -> Tensor Build Int32

                      size: For set ranked n, this is a Tensor with rank `n-1`, and the same 1st + allowed but ignored.

                      If validate_indices is True, this op validates the order and range of set + indices.

                      setSize' Source #

                      Arguments

                      :: OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      set_indices: 2D Tensor, indices of a SparseTensor.

                      -> Tensor v'2 t

                      set_values: 1D Tensor, values of a SparseTensor.

                      -> Tensor v'3 Int64

                      set_shape: 1D Tensor, shape of a SparseTensor.

                      -> Tensor Build Int32

                      size: For set ranked n, this is a Tensor with rank `n-1`, and the same 1st `n-1` dimensions as set. Each value is the number of unique elements in - the corresponding `[0...n-1]` dimension of set.

                      shape

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` out_type) 
                      => Tensor v'1 t

                      input

                      -> Tensor Build out_type

                      output

                      Returns the shape of a tensor.

                      This operation returns a 1-D integer tensor representing the shape of input.

                      For example:

                      ```prettyprint + the corresponding `[0...n-1]` dimension of set.

                      shape Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] out_type) 
                      => Tensor v'1 t

                      input

                      -> Tensor Build out_type

                      output

                      Returns the shape of a tensor.

                      This operation returns a 1-D integer tensor representing the shape of input.

                      For example:

                      ``` # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] shape(t) ==> [2, 2, 3] - ```

                      shape'

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` out_type) 
                      => OpParams 
                      -> Tensor v'1 t

                      input

                      -> Tensor Build out_type

                      output

                      shapeN

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` out_type) 
                      => [Tensor v'1 t]

                      input

                      -> [Tensor Build out_type]

                      output

                      Returns shape of tensors.

                      This operation returns N 1-D integer tensors representing shape of `input[i]s`.

                      shapeN'

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` out_type) 
                      => OpParams 
                      -> [Tensor v'1 t]

                      input

                      -> [Tensor Build out_type]

                      output

                      shardedFilename

                      Arguments

                      :: Tensor v'1 ByteString

                      basename

                      -> Tensor v'2 Int32

                      shard

                      -> Tensor v'3 Int32

                      num_shards

                      -> Tensor Build ByteString

                      filename

                      Generate a sharded filename. The filename is printf formatted as

                      %s-%05d-of-%05d, basename, shard, num_shards.

                      shardedFilename'

                      Arguments

                      :: OpParams 
                      -> Tensor v'1 ByteString

                      basename

                      -> Tensor v'2 Int32

                      shard

                      -> Tensor v'3 Int32

                      num_shards

                      -> Tensor Build ByteString

                      filename

                      shardedFilespec

                      Arguments

                      :: Tensor v'1 ByteString

                      basename

                      -> Tensor v'2 Int32

                      num_shards

                      -> Tensor Build ByteString

                      filename

                      Generate a glob pattern matching all sharded file names.

                      shardedFilespec'

                      Arguments

                      :: OpParams 
                      -> Tensor v'1 ByteString

                      basename

                      -> Tensor v'2 Int32

                      num_shards

                      -> Tensor Build ByteString

                      filename

                      sigmoid

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                      => Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      Computes sigmoid of x element-wise.

                      Specifically, `y = 1 / (1 + exp(-x))`.

                      sigmoidGrad

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                      => Tensor v'1 t

                      x

                      -> Tensor v'2 t

                      y

                      -> Tensor Build t

                      z

                      Computes the gradient of the sigmoid of x wrt its input.

                      Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and - dy is the corresponding input gradient.

                      sigmoidGrad'

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      x

                      -> Tensor v'2 t

                      y

                      -> Tensor Build t

                      z

                      sign

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                      => Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      Returns an element-wise indication of the sign of a number.

                      `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

                      For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

                      sin

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                      => Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      Computes sin of x element-wise.

                      sin'

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      size

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` out_type) 
                      => Tensor v'1 t

                      input

                      -> Tensor Build out_type

                      output

                      Returns the size of a tensor.

                      This operation returns an integer representing the number of elements in - input.

                      For example:

                      ```prettyprint + ```

                      shape' Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] out_type) 
                      => OpParams 
                      -> Tensor v'1 t

                      input

                      -> Tensor Build out_type

                      output

                      shapeN Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] out_type) 
                      => [Tensor v'1 t]

                      input

                      -> [Tensor Build out_type]

                      output

                      Returns shape of tensors.

                      This operation returns N 1-D integer tensors representing shape of `input[i]s`.

                      shapeN' Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] out_type) 
                      => OpParams 
                      -> [Tensor v'1 t]

                      input

                      -> [Tensor Build out_type]

                      output

                      shardedFilename Source #

                      Arguments

                      :: Tensor v'1 ByteString

                      basename

                      -> Tensor v'2 Int32

                      shard

                      -> Tensor v'3 Int32

                      num_shards

                      -> Tensor Build ByteString

                      filename

                      Generate a sharded filename. The filename is printf formatted as

                      %s-%05d-of-%05d, basename, shard, num_shards.

                      shardedFilename' Source #

                      Arguments

                      :: OpParams 
                      -> Tensor v'1 ByteString

                      basename

                      -> Tensor v'2 Int32

                      shard

                      -> Tensor v'3 Int32

                      num_shards

                      -> Tensor Build ByteString

                      filename

                      shardedFilespec Source #

                      Arguments

                      :: Tensor v'1 ByteString

                      basename

                      -> Tensor v'2 Int32

                      num_shards

                      -> Tensor Build ByteString

                      filename

                      Generate a glob pattern matching all sharded file names.

                      shardedFilespec' Source #

                      Arguments

                      :: OpParams 
                      -> Tensor v'1 ByteString

                      basename

                      -> Tensor v'2 Int32

                      num_shards

                      -> Tensor Build ByteString

                      filename

                      shuffleDataset Source #

                      Arguments

                      :: MonadBuild m' 
                      => [DataType]

                      output_types

                      -> Tensor v'1 ResourceHandle

                      input_dataset

                      -> Tensor v'2 Int64

                      buffer_size: The number of output elements to buffer in an iterator over + this dataset. Compare with the min_after_dequeue attr when creating a + RandomShuffleQueue.

                      -> Tensor v'3 Int64

                      seed: A scalar seed for the random number generator. If either seed or + seed2 is set to be non-zero, the random number generator is seeded + by the given seed. Otherwise, a random seed is used.

                      -> Tensor v'4 Int64

                      seed2: A second scalar seed to avoid seed collision.

                      -> m' (Tensor Value ResourceHandle)

                      handle

                      Creates a dataset that shuffles elements from input_dataset pseudorandomly.

                      shuffleDataset' Source #

                      Arguments

                      :: MonadBuild m' 
                      => OpParams 
                      -> [DataType]

                      output_types

                      -> Tensor v'1 ResourceHandle

                      input_dataset

                      -> Tensor v'2 Int64

                      buffer_size: The number of output elements to buffer in an iterator over + this dataset. Compare with the min_after_dequeue attr when creating a + RandomShuffleQueue.

                      -> Tensor v'3 Int64

                      seed: A scalar seed for the random number generator. If either seed or + seed2 is set to be non-zero, the random number generator is seeded + by the given seed. Otherwise, a random seed is used.

                      -> Tensor v'4 Int64

                      seed2: A second scalar seed to avoid seed collision.

                      -> m' (Tensor Value ResourceHandle)

                      handle

                      sigmoid Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                      => Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      Computes sigmoid of x element-wise.

                      Specifically, `y = 1 / (1 + exp(-x))`.

                      sigmoidGrad Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                      => Tensor v'1 t

                      x

                      -> Tensor v'2 t

                      y

                      -> Tensor Build t

                      z

                      Computes the gradient of the sigmoid of x wrt its input.

                      Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and + dy is the corresponding input gradient.

                      sign Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                      => Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      Returns an element-wise indication of the sign of a number.

                      `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

                      For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

                      sin Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                      => Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      Computes sin of x element-wise.

                      sinh Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                      => Tensor v'1 t

                      x

                      -> Tensor Build t

                      y

                      Computes hyperbolic sine of x element-wise.

                      size Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] out_type) 
                      => Tensor v'1 t

                      input

                      -> Tensor Build out_type

                      output

                      Returns the size of a tensor.

                      This operation returns an integer representing the number of elements in + input.

                      For example:

                      ``` # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] size(t) ==> 12 - ```

                      size'

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` out_type) 
                      => OpParams 
                      -> Tensor v'1 t

                      input

                      -> Tensor Build out_type

                      output

                      skipgram

                      Arguments

                      :: MonadBuild m' 
                      => Int64

                      batch_size: The size of produced batch.

                      -> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)

                      (vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels)

                      • vocab_word: A vector of words in the corpus.
                      • vocab_freq: Frequencies of words. Sorted in the non-ascending order.
                      • words_per_epoch: Number of words per epoch in the data file.
                      • current_epoch: The current epoch number.
                      • total_words_processed: The total number of words processed so far.
                      • examples: A vector of word ids.
                      • labels: A vector of word ids.

                      Parses a text file and creates a batch of examples.

                      skipgram'

                      Arguments

                      :: MonadBuild m' 
                      => OpParams 
                      -> Int64

                      batch_size: The size of produced batch.

                      -> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)

                      (vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels)

                      • vocab_word: A vector of words in the corpus.
                      • vocab_freq: Frequencies of words. Sorted in the non-ascending order.
                      • words_per_epoch: Number of words per epoch in the data file.
                      • current_epoch: The current epoch number.
                      • total_words_processed: The total number of words processed so far.
                      • examples: A vector of word ids.
                      • labels: A vector of word ids.

                      slice

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` index) 
                      => Tensor v'1 t

                      input

                      -> Tensor v'2 index

                      begin: begin[i] specifies the offset into the ith dimension of - input to slice from.

                      -> Tensor v'3 index

                      size: size[i] specifies the number of elements of the ith dimension + ```

                      size' Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] out_type) 
                      => OpParams 
                      -> Tensor v'1 t

                      input

                      -> Tensor Build out_type

                      output

                      skipDataset Source #

                      Arguments

                      :: MonadBuild m' 
                      => [DataType]

                      output_types

                      -> Tensor v'1 ResourceHandle

                      input_dataset

                      -> Tensor v'2 Int64

                      count: A scalar representing the number of elements from the input_dataset + that should be skipped. If count is -1, skips everything.

                      -> m' (Tensor Value ResourceHandle)

                      handle

                      Creates a dataset that skips count elements from the input_dataset.

                      skipDataset' Source #

                      Arguments

                      :: MonadBuild m' 
                      => OpParams 
                      -> [DataType]

                      output_types

                      -> Tensor v'1 ResourceHandle

                      input_dataset

                      -> Tensor v'2 Int64

                      count: A scalar representing the number of elements from the input_dataset + that should be skipped. If count is -1, skips everything.

                      -> m' (Tensor Value ResourceHandle)

                      handle

                      skipgram Source #

                      Arguments

                      :: MonadBuild m' 
                      => Int64

                      batch_size: The size of produced batch.

                      -> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)

                      (vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels)

                      • vocab_word: A vector of words in the corpus.
                      • vocab_freq: Frequencies of words. Sorted in the non-ascending order.
                      • words_per_epoch: Number of words per epoch in the data file.
                      • current_epoch: The current epoch number.
                      • total_words_processed: The total number of words processed so far.
                      • examples: A vector of word ids.
                      • labels: A vector of word ids.

                      Parses a text file and creates a batch of examples.

                      skipgram' Source #

                      Arguments

                      :: MonadBuild m' 
                      => OpParams 
                      -> Int64

                      batch_size: The size of produced batch.

                      -> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)

                      (vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels)

                      • vocab_word: A vector of words in the corpus.
                      • vocab_freq: Frequencies of words. Sorted in the non-ascending order.
                      • words_per_epoch: Number of words per epoch in the data file.
                      • current_epoch: The current epoch number.
                      • total_words_processed: The total number of words processed so far.
                      • examples: A vector of word ids.
                      • labels: A vector of word ids.

                      slice Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] index) 
                      => Tensor v'1 t

                      input

                      -> Tensor v'2 index

                      begin: begin[i] specifies the offset into the ith dimension of + input to slice from.

                      -> Tensor v'3 index

                      size: size[i] specifies the number of elements of the ith dimension of input to slice. If size[i] is -1, all remaining elements in dimension i are included in the slice (i.e. this is equivalent to setting - size[i] = input.dim_size(i) - begin[i]).

                      -> Tensor Build t

                      output

                      Return a slice from input.

                      The output tensor is a tensor with dimensions described by size + size[i] = input.dim_size(i) - begin[i]).

                      -> Tensor Build t

                      output

                      Return a slice from input.

                      The output tensor is a tensor with dimensions described by size whose values are extracted from input starting at the offsets in begin.

                      • Requirements*: - 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)

                      slice'

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` index) 
                      => OpParams 
                      -> Tensor v'1 t

                      input

                      -> Tensor v'2 index

                      begin: begin[i] specifies the offset into the ith dimension of - input to slice from.

                      -> Tensor v'3 index

                      size: size[i] specifies the number of elements of the ith dimension + 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)

                      slice' Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] index) 
                      => OpParams 
                      -> Tensor v'1 t

                      input

                      -> Tensor v'2 index

                      begin: begin[i] specifies the offset into the ith dimension of + input to slice from.

                      -> Tensor v'3 index

                      size: size[i] specifies the number of elements of the ith dimension of input to slice. If size[i] is -1, all remaining elements in dimension i are included in the slice (i.e. this is equivalent to setting - size[i] = input.dim_size(i) - begin[i]).

                      -> Tensor Build t

                      output

                      softmax

                      Arguments

                      :: OneOf `[Word16, Double, Float]` t 
                      => Tensor v'1 t

                      logits: 2-D with shape `[batch_size, num_classes]`.

                      -> Tensor Build t

                      softmax: Same shape as logits.

                      Computes softmax activations.

                      For each batch i and class j we have

                      softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

                      softmax'

                      Arguments

                      :: OneOf `[Word16, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      logits: 2-D with shape `[batch_size, num_classes]`.

                      -> Tensor Build t

                      softmax: Same shape as logits.

                      softmaxCrossEntropyWithLogits

                      Arguments

                      :: OneOf `[Word16, Double, Float]` t 
                      => Tensor v'1 t

                      features: batch_size x num_classes matrix

                      -> Tensor v'2 t

                      labels: batch_size x num_classes matrix + size[i] = input.dim_size(i) - begin[i]).

                      -> Tensor Build t

                      output

                      softmax Source #

                      Arguments

                      :: OneOf '[Word16, Double, Float] t 
                      => Tensor v'1 t

                      logits: 2-D with shape `[batch_size, num_classes]`.

                      -> Tensor Build t

                      softmax: Same shape as logits.

                      Computes softmax activations.

                      For each batch i and class j we have

                      softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

                      softmax' Source #

                      Arguments

                      :: OneOf '[Word16, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      logits: 2-D with shape `[batch_size, num_classes]`.

                      -> Tensor Build t

                      softmax: Same shape as logits.

                      softmaxCrossEntropyWithLogits Source #

                      Arguments

                      :: OneOf '[Word16, Double, Float] t 
                      => Tensor v'1 t

                      features: batch_size x num_classes matrix

                      -> Tensor v'2 t

                      labels: batch_size x num_classes matrix The caller must ensure that each batch of labels represents a valid - probability distribution.

                      -> (Tensor Build t, Tensor Build t)

                      (loss, backprop)

                      • loss: Per example loss (batch_size vector).
                      • backprop: backpropagated gradients (batch_size x num_classes matrix).

                      Computes softmax cross entropy cost and gradients to backpropagate.

                      Inputs are the logits, not probabilities.

                      softmaxCrossEntropyWithLogits'

                      Arguments

                      :: OneOf `[Word16, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      features: batch_size x num_classes matrix

                      -> Tensor v'2 t

                      labels: batch_size x num_classes matrix + probability distribution.

                      -> (Tensor Build t, Tensor Build t)

                      (loss, backprop)

                      • loss: Per example loss (batch_size vector).
                      • backprop: backpropagated gradients (batch_size x num_classes matrix).

                      Computes softmax cross entropy cost and gradients to backpropagate.

                      Inputs are the logits, not probabilities.

                      softmaxCrossEntropyWithLogits' Source #

                      Arguments

                      :: OneOf '[Word16, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      features: batch_size x num_classes matrix

                      -> Tensor v'2 t

                      labels: batch_size x num_classes matrix The caller must ensure that each batch of labels represents a valid - probability distribution.

                      -> (Tensor Build t, Tensor Build t)

                      (loss, backprop)

                      • loss: Per example loss (batch_size vector).
                      • backprop: backpropagated gradients (batch_size x num_classes matrix).

                      softplus

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => Tensor v'1 t

                      features

                      -> Tensor Build t

                      activations

                      Computes softplus: `log(exp(features) + 1)`.

                      softplus'

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      features

                      -> Tensor Build t

                      activations

                      softplusGrad

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => Tensor v'1 t

                      gradients: The backpropagated gradients to the corresponding softplus operation.

                      -> Tensor v'2 t

                      features: The features passed as input to the corresponding softplus operation.

                      -> Tensor Build t

                      backprops: The gradients: `gradients / (1 + exp(-features))`.

                      Computes softplus gradients for a softplus operation.

                      softplusGrad'

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      gradients: The backpropagated gradients to the corresponding softplus operation.

                      -> Tensor v'2 t

                      features: The features passed as input to the corresponding softplus operation.

                      -> Tensor Build t

                      backprops: The gradients: `gradients / (1 + exp(-features))`.

                      softsign

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => Tensor v'1 t

                      features

                      -> Tensor Build t

                      activations

                      Computes softsign: `features / (abs(features) + 1)`.

                      softsign'

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      features

                      -> Tensor Build t

                      activations

                      softsignGrad

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => Tensor v'1 t

                      gradients: The backpropagated gradients to the corresponding softsign operation.

                      -> Tensor v'2 t

                      features: The features passed as input to the corresponding softsign operation.

                      -> Tensor Build t

                      backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`.

                      Computes softsign gradients for a softsign operation.

                      softsignGrad'

                      Arguments

                      :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 t

                      gradients: The backpropagated gradients to the corresponding softsign operation.

                      -> Tensor v'2 t

                      features: The features passed as input to the corresponding softsign operation.

                      -> Tensor Build t

                      backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`.

                      spaceToBatch

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
                      => Int64

                      block_size

                      -> Tensor v'1 t

                      input: 4-D with shape `[batch, height, width, depth]`.

                      -> Tensor v'2 tpaddings

                      paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + probability distribution.

                      -> (Tensor Build t, Tensor Build t)

                      (loss, backprop)

                      • loss: Per example loss (batch_size vector).
                      • backprop: backpropagated gradients (batch_size x num_classes matrix).

                      softplus Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => Tensor v'1 t

                      features

                      -> Tensor Build t

                      activations

                      Computes softplus: `log(exp(features) + 1)`.

                      softplus' Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      features

                      -> Tensor Build t

                      activations

                      softplusGrad Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => Tensor v'1 t

                      gradients: The backpropagated gradients to the corresponding softplus operation.

                      -> Tensor v'2 t

                      features: The features passed as input to the corresponding softplus operation.

                      -> Tensor Build t

                      backprops: The gradients: `gradients / (1 + exp(-features))`.

                      Computes softplus gradients for a softplus operation.

                      softplusGrad' Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      gradients: The backpropagated gradients to the corresponding softplus operation.

                      -> Tensor v'2 t

                      features: The features passed as input to the corresponding softplus operation.

                      -> Tensor Build t

                      backprops: The gradients: `gradients / (1 + exp(-features))`.

                      softsign Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => Tensor v'1 t

                      features

                      -> Tensor Build t

                      activations

                      Computes softsign: `features / (abs(features) + 1)`.

                      softsign' Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      features

                      -> Tensor Build t

                      activations

                      softsignGrad Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => Tensor v'1 t

                      gradients: The backpropagated gradients to the corresponding softsign operation.

                      -> Tensor v'2 t

                      features: The features passed as input to the corresponding softsign operation.

                      -> Tensor Build t

                      backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`.

                      Computes softsign gradients for a softsign operation.

                      softsignGrad' Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 t

                      gradients: The backpropagated gradients to the corresponding softsign operation.

                      -> Tensor v'2 t

                      features: The features passed as input to the corresponding softsign operation.

                      -> Tensor Build t

                      backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`.

                      spaceToBatch Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                      => Int64

                      block_size

                      -> Tensor v'1 t

                      input: 4-D with shape `[batch, height, width, depth]`.

                      -> Tensor v'2 tpaddings

                      paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies the padding of the input with zeros across the spatial dimensions as follows:

                      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]

                      The effective spatial dimensions of the zero-padded input tensor will be:

                      height_pad = pad_top + height + pad_bottom width_pad = pad_left + width + pad_right

                      The attr block_size must be greater than one. It indicates the block size.

                      • Non-overlapping blocks of size `block_size x block size` in the height and width dimensions are rearranged into the batch dimension at each location.
                      • The batch of the output tensor is `batch * block_size * block_size`.
                      • Both height_pad and width_pad must be divisible by block_size.

                      The shape of the output will be:

                      [batch*block_size*block_size, height_padblock_size, width_padblock_size, - depth]

                      Some examples:

                      1. For the following input of shape `[1, 2, 2, 1]` and block_size of 2:

                      ```prettyprint + depth]

                      Some examples:

                      1. For the following input of shape `[1, 2, 2, 1]` and block_size of 2:

                      ``` x = [[[[1], [2]], [[3], [4]]]] - ```

                      The output tensor has shape `[4, 1, 1, 1]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[4, 1, 1, 1]` and value:

                      ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - ```

                      1. For the following input of shape `[1, 2, 2, 3]` and block_size of 2:

                      ```prettyprint + ```

                      1. For the following input of shape `[1, 2, 2, 3]` and block_size of 2:

                      ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] - ```

                      The output tensor has shape `[4, 1, 1, 3]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[4, 1, 1, 3]` and value:

                      ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] - ```

                      1. For the following input of shape `[1, 4, 4, 1]` and block_size of 2:

                      ```prettyprint + ```

                      1. For the following input of shape `[1, 4, 4, 1]` and block_size of 2:

                      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

                      The output tensor has shape `[4, 2, 2, 1]` and value:

                      ```prettyprint - x = [[[[1], [3]], [[5], [7]]], + ```

                      The output tensor has shape `[4, 2, 2, 1]` and value:

                      ``` + x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] - ```

                      1. For the following input of shape `[2, 2, 4, 1]` and block_size of 2:

                      ```prettyprint + ```

                      1. For the following input of shape `[2, 2, 4, 1]` and block_size of 2:

                      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

                      The output tensor has shape `[8, 1, 2, 1]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[8, 1, 2, 1]` and value:

                      ``` x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] ```

                      Among others, this operation is useful for reducing atrous convolution into - regular convolution.

                      -> Tensor Build t

                      output

                      SpaceToBatch for 4-D tensors of type T.

                      This is a legacy version of the more general SpaceToBatchND.

                      Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + regular convolution.

                      -> Tensor Build t

                      output

                      SpaceToBatch for 4-D tensors of type T.

                      This is a legacy version of the more general SpaceToBatchND.

                      Zero-pads and then rearranges (permutes) blocks of spatial data into batch. More specifically, this op outputs a copy of the input tensor where values from the height and width dimensions are moved to the batch dimension. After the zero-padding, both height and width of the input must be divisible by the - block size.

                      spaceToBatch'

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
                      => OpParams 
                      -> Int64

                      block_size

                      -> Tensor v'1 t

                      input: 4-D with shape `[batch, height, width, depth]`.

                      -> Tensor v'2 tpaddings

                      paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + block size.

                      spaceToBatch' Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] tpaddings) 
                      => OpParams 
                      -> Int64

                      block_size

                      -> Tensor v'1 t

                      input: 4-D with shape `[batch, height, width, depth]`.

                      -> Tensor v'2 tpaddings

                      paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies the padding of the input with zeros across the spatial dimensions as follows:

                      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]

                      The effective spatial dimensions of the zero-padded input tensor will be:

                      height_pad = pad_top + height + pad_bottom width_pad = pad_left + width + pad_right

                      The attr block_size must be greater than one. It indicates the block size.

                      • Non-overlapping blocks of size `block_size x block size` in the height and width dimensions are rearranged into the batch dimension at each location.
                      • The batch of the output tensor is `batch * block_size * block_size`.
                      • Both height_pad and width_pad must be divisible by block_size.

                      The shape of the output will be:

                      [batch*block_size*block_size, height_padblock_size, width_padblock_size, - depth]

                      Some examples:

                      1. For the following input of shape `[1, 2, 2, 1]` and block_size of 2:

                      ```prettyprint + depth]

                      Some examples:

                      1. For the following input of shape `[1, 2, 2, 1]` and block_size of 2:

                      ``` x = [[[[1], [2]], [[3], [4]]]] - ```

                      The output tensor has shape `[4, 1, 1, 1]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[4, 1, 1, 1]` and value:

                      ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - ```

                      1. For the following input of shape `[1, 2, 2, 3]` and block_size of 2:

                      ```prettyprint + ```

                      1. For the following input of shape `[1, 2, 2, 3]` and block_size of 2:

                      ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] - ```

                      The output tensor has shape `[4, 1, 1, 3]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[4, 1, 1, 3]` and value:

                      ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] - ```

                      1. For the following input of shape `[1, 4, 4, 1]` and block_size of 2:

                      ```prettyprint + ```

                      1. For the following input of shape `[1, 4, 4, 1]` and block_size of 2:

                      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

                      The output tensor has shape `[4, 2, 2, 1]` and value:

                      ```prettyprint - x = [[[[1], [3]], [[5], [7]]], + ```

                      The output tensor has shape `[4, 2, 2, 1]` and value:

                      ``` + x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] - ```

                      1. For the following input of shape `[2, 2, 4, 1]` and block_size of 2:

                      ```prettyprint + ```

                      1. For the following input of shape `[2, 2, 4, 1]` and block_size of 2:

                      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

                      The output tensor has shape `[8, 1, 2, 1]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[8, 1, 2, 1]` and value:

                      ``` x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] ```

                      Among others, this operation is useful for reducing atrous convolution into - regular convolution.

                      -> Tensor Build t

                      output

                      spaceToBatchND

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tpaddings) 
                      => Tensor v'1 t

                      input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - where spatial_shape has M dimensions.

                      -> Tensor v'2 tblock_shape

                      block_shape: 1-D with shape `[M]`, all values must be >= 1.

                      -> Tensor v'3 tpaddings

                      paddings: 2-D with shape `[M, 2]`, all values must be >= 0. + regular convolution.

                      -> Tensor Build t

                      output

                      spaceToBatchND Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) 
                      => Tensor v'1 t

                      input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

                      -> Tensor v'2 tblock_shape

                      block_shape: 1-D with shape `[M]`, all values must be >= 1.

                      -> Tensor v'3 tpaddings

                      paddings: 2-D with shape `[M, 2]`, all values must be >= 0. `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension `i + 1`, which corresponds to spatial dimension i. It is required that `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.

                      This operation is equivalent to the following steps:

                      1. Zero-pad the start and end of dimensions `[1, ..., M]` of the @@ -2823,48 +3262,48 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core ..., padded_shape[M] / block_shape[M-1]] + remaining_shape

                        Some examples:

                        1. For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and - `paddings = [[0, 0], [0, 0]]`:

                        ```prettyprint + `paddings = [[0, 0], [0, 0]]`:

                      ``` x = [[[[1], [2]], [[3], [4]]]] - ```

                      The output tensor has shape `[4, 1, 1, 1]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[4, 1, 1, 1]` and value:

                      ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```

                      1. For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and - `paddings = [[0, 0], [0, 0]]`:

                      ```prettyprint + `paddings = [[0, 0], [0, 0]]`:

                      ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] - ```

                      The output tensor has shape `[4, 1, 1, 3]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[4, 1, 1, 3]` and value:

                      ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ```

                      1. For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and - `paddings = [[0, 0], [0, 0]]`:

                      ```prettyprint + `paddings = [[0, 0], [0, 0]]`:

                      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

                      The output tensor has shape `[4, 2, 2, 1]` and value:

                      ```prettyprint - x = [[[[1], [3]], [[5], [7]]], + ```

                      The output tensor has shape `[4, 2, 2, 1]` and value:

                      ``` + x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ```

                      1. For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and - paddings = `[[0, 0], [2, 0]]`:

                      ```prettyprint + paddings = `[[0, 0], [2, 0]]`:

                      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

                      The output tensor has shape `[8, 1, 3, 1]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[8, 1, 3, 1]` and value:

                      ``` x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] ```

                      Among others, this operation is useful for reducing atrous convolution into - regular convolution.

                      -> Tensor Build t

                      output

                      SpaceToBatch for N-D tensors of type T.

                      This operation divides "spatial" dimensions `[1, ..., M]` of the input into a + regular convolution.

                      -> Tensor Build t

                      output

                      SpaceToBatch for N-D tensors of type T.

                      This operation divides "spatial" dimensions `[1, ..., M]` of the input into a grid of blocks of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that in the output, the spatial dimensions `[1, ..., M]` correspond to the position within the grid, and the batch dimension combines both the position within a spatial block and the original batch position. Prior to division into blocks, the spatial dimensions of the input are optionally zero padded according to paddings. See below for a - precise description.

                      spaceToBatchND'

                      Arguments

                      :: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tpaddings) 
                      => OpParams 
                      -> Tensor v'1 t

                      input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - where spatial_shape has M dimensions.

                      -> Tensor v'2 tblock_shape

                      block_shape: 1-D with shape `[M]`, all values must be >= 1.

                      -> Tensor v'3 tpaddings

                      paddings: 2-D with shape `[M, 2]`, all values must be >= 0. + precise description.

                      spaceToBatchND' Source #

                      Arguments

                      :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) 
                      => OpParams 
                      -> Tensor v'1 t

                      input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

                      -> Tensor v'2 tblock_shape

                      block_shape: 1-D with shape `[M]`, all values must be >= 1.

                      -> Tensor v'3 tpaddings

                      paddings: 2-D with shape `[M, 2]`, all values must be >= 0. `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension `i + 1`, which corresponds to spatial dimension i. It is required that `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.

                      This operation is equivalent to the following steps:

                      1. Zero-pad the start and end of dimensions `[1, ..., M]` of the @@ -2884,40 +3323,40 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core ..., padded_shape[M] / block_shape[M-1]] + remaining_shape

                        Some examples:

                        1. For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and - `paddings = [[0, 0], [0, 0]]`:

                        ```prettyprint + `paddings = [[0, 0], [0, 0]]`:

                      ``` x = [[[[1], [2]], [[3], [4]]]] - ```

                      The output tensor has shape `[4, 1, 1, 1]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[4, 1, 1, 1]` and value:

                      ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```

                      1. For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and - `paddings = [[0, 0], [0, 0]]`:

                      ```prettyprint + `paddings = [[0, 0], [0, 0]]`:

                      ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] - ```

                      The output tensor has shape `[4, 1, 1, 3]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[4, 1, 1, 3]` and value:

                      ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ```

                      1. For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and - `paddings = [[0, 0], [0, 0]]`:

                      ```prettyprint + `paddings = [[0, 0], [0, 0]]`:

                      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

                      The output tensor has shape `[4, 2, 2, 1]` and value:

                      ```prettyprint - x = [[[[1], [3]], [[5], [7]]], + ```

                      The output tensor has shape `[4, 2, 2, 1]` and value:

                      ``` + x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ```

                      1. For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and - paddings = `[[0, 0], [2, 0]]`:

                      ```prettyprint + paddings = `[[0, 0], [2, 0]]`:

                      ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

                      The output tensor has shape `[8, 1, 3, 1]` and value:

                      ```prettyprint + ```

                      The output tensor has shape `[8, 1, 3, 1]` and value:

                      ``` x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] ```

                      Among others, this operation is useful for reducing atrous convolution into - regular convolution.

                      -> Tensor Build t

                      output

                      spaceToDepth

                      Arguments

                      :: TensorType t 
                      => Int64

                      block_size: The size of the spatial block.

                      -> Tensor v'1 t

                      input

                      -> Tensor Build t

                      output

                      SpaceToDepth for tensors of type T.

                      Rearranges blocks of spatial data, into depth. More specifically, + regular convolution.

                      -> Tensor Build t

                      output

                      spaceToDepth Source #

                      Arguments

                      :: TensorType t 
                      => Int64

                      block_size: The size of the spatial block.

                      -> Tensor v'1 t

                      input

                      -> Tensor Build t

                      output

                      SpaceToDepth for tensors of type T.

                      Rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of the input tensor where values from the height and width dimensions are moved to the depth dimension. The attr block_size indicates the input block size and how the data is moved.

                      • Non-overlapping blocks of size `block_size x block size` are rearranged @@ -2927,45 +3366,46 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core `[batch, heightblock_size, widthblock_size, depth*block_size*block_size]`

                        This operation requires that the input tensor be of rank 4, and that block_size be >=1 and a divisor of both the input height and width.

                        This operation is useful for resizing the activations between convolutions (but keeping all data), e.g. instead of pooling. It is also useful for training - purely convolutional models.

                        For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:

                        ```prettyprint + purely convolutional models.

                        For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:

                        ``` x = [[[[1], [2]], [[3], [4]]]] - ```

                        This operation will output a tensor of shape `[1, 1, 1, 4]`:

                        ```prettyprint + ```

                        This operation will output a tensor of shape `[1, 1, 1, 4]`:

                        ``` [[[[1, 2, 3, 4]]]] ```

                        Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, the corresponding output will have a single element (i.e. width and height are both 1) and will have a depth of 4 channels (1 * block_size * block_size). - The output element shape is `[1, 1, 4]`.

                        For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.

                        ```prettyprint + The output element shape is `[1, 1, 4]`.

                        For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.

                        ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ```

                        This operation, for block_size of 2, will return the following tensor of shape - `[1, 1, 1, 12]`

                        ```prettyprint + `[1, 1, 1, 12]`

                        ``` [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] - ```

                        Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:

                        ```prettyprint + ```

                        Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:

                        ``` x = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]], [[9], [10], [13], [14]], [[11], [12], [15], [16]]]] - ```

                        the operator will return the following tensor of shape `[1 2 2 4]`:

                        ```prettyprint + ```

                        the operator will return the following tensor of shape `[1 2 2 4]`:

                        ``` x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]] - ```

                      spaceToDepth'

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Int64

                      block_size: The size of the spatial block.

                      -> Tensor v'1 t

                      input

                      -> Tensor Build t

                      output

                      sparseAccumulatorApplyGradient

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
                      => Bool

                      has_known_shape: Boolean indicating whether gradient_shape is unknown, in which - case the input is ignored during validation.

                      -> Tensor Ref ByteString

                      handle: The handle to a accumulator.

                      -> Tensor v'2 Int64

                      local_step: The local_step value at which the sparse gradient was computed.

                      -> Tensor v'3 Int64

                      gradient_indices: Indices of the sparse gradient to be accumulated. Must be a - vector.

                      -> Tensor v'4 dtype

                      gradient_values: Values are the non-zero slices of the gradient, and must have + ```

                      spaceToDepth' Source #

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Int64

                      block_size: The size of the spatial block.

                      -> Tensor v'1 t

                      input

                      -> Tensor Build t

                      output

                      sparseAccumulatorApplyGradient Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) 
                      => Bool

                      has_known_shape: Boolean indicating whether gradient_shape is unknown, in which + case the input is ignored during validation.

                      -> Tensor Ref ByteString

                      handle: The handle to a accumulator.

                      -> Tensor v'2 Int64

                      local_step: The local_step value at which the sparse gradient was computed.

                      -> Tensor v'3 Int64

                      gradient_indices: Indices of the sparse gradient to be accumulated. Must be a + vector.

                      -> Tensor v'4 dtype

                      gradient_values: Values are the non-zero slices of the gradient, and must have the same first dimension as indices, i.e., the nnz represented by indices and - values must be consistent.

                      -> Tensor v'5 Int64

                      gradient_shape: Shape of the sparse gradient to be accumulated.

                      -> m' ControlNode 

                      Applies a sparse gradient to a given accumulator. Does not add if local_step is

                      lesser than the accumulator's global_step.

                      sparseAccumulatorApplyGradient'

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
                      => OpParams 
                      -> Bool

                      has_known_shape: Boolean indicating whether gradient_shape is unknown, in which - case the input is ignored during validation.

                      -> Tensor Ref ByteString

                      handle: The handle to a accumulator.

                      -> Tensor v'2 Int64

                      local_step: The local_step value at which the sparse gradient was computed.

                      -> Tensor v'3 Int64

                      gradient_indices: Indices of the sparse gradient to be accumulated. Must be a - vector.

                      -> Tensor v'4 dtype

                      gradient_values: Values are the non-zero slices of the gradient, and must have + values must be consistent.

                      -> Tensor v'5 Int64

                      gradient_shape: Shape of the sparse gradient to be accumulated.

                      -> m' ControlNode 

                      Applies a sparse gradient to a given accumulator.

                      Does not add if local_step is smaller than the accumulator's + global_step.

                      sparseAccumulatorApplyGradient' Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) 
                      => OpParams 
                      -> Bool

                      has_known_shape: Boolean indicating whether gradient_shape is unknown, in which + case the input is ignored during validation.

                      -> Tensor Ref ByteString

                      handle: The handle to a accumulator.

                      -> Tensor v'2 Int64

                      local_step: The local_step value at which the sparse gradient was computed.

                      -> Tensor v'3 Int64

                      gradient_indices: Indices of the sparse gradient to be accumulated. Must be a + vector.

                      -> Tensor v'4 dtype

                      gradient_values: Values are the non-zero slices of the gradient, and must have the same first dimension as indices, i.e., the nnz represented by indices and - values must be consistent.

                      -> Tensor v'5 Int64

                      gradient_shape: Shape of the sparse gradient to be accumulated.

                      -> m' ControlNode 

                      sparseAccumulatorTakeGradient

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
                      => Tensor Ref ByteString

                      handle: The handle to a SparseConditionalAccumulator.

                      -> Tensor v'2 Int32

                      num_required: Number of gradients required before we return an aggregate.

                      -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

                      (indices, values, shape)

                      • indices: Indices of the average of the accumulated sparse gradients.
                      • values: Values of the average of the accumulated sparse gradients.
                      • shape: Shape of the average of the accumulated sparse gradients.

                      Extracts the average sparse gradient in the given SparseConditionalAccumulator,

                      provided that sufficient (i.e., more than num_required) gradients have been - accumulated. The op will blocks until sufficient gradients have been - accumulated. If the accumulator has already aggregated more than num_required - gradients, it will return its average of the accumulated gradients. - Also automatically increments the recorded global_step in the accumulator by 1, - and resets the aggregate to 0.

                      sparseAccumulatorTakeGradient'

                      Arguments

                      :: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
                      => OpParams 
                      -> Tensor Ref ByteString

                      handle: The handle to a SparseConditionalAccumulator.

                      -> Tensor v'2 Int32

                      num_required: Number of gradients required before we return an aggregate.

                      -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

                      (indices, values, shape)

                      • indices: Indices of the average of the accumulated sparse gradients.
                      • values: Values of the average of the accumulated sparse gradients.
                      • shape: Shape of the average of the accumulated sparse gradients.

                      sparseAdd

                      Arguments

                      :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) 
                      => Tensor v'1 Int64

                      a_indices: 2-D. The indices of the first SparseTensor, size `[nnz, ndims]` Matrix.

                      -> Tensor v'2 t

                      a_values: 1-D. The values of the first SparseTensor, size `[nnz]` Vector.

                      -> Tensor v'3 Int64

                      a_shape: 1-D. The shape of the first SparseTensor, size `[ndims]` Vector.

                      -> Tensor v'4 Int64

                      b_indices: 2-D. The indices of the second SparseTensor, size `[nnz, ndims]` Matrix.

                      -> Tensor v'5 t

                      b_values: 1-D. The values of the second SparseTensor, size `[nnz]` Vector.

                      -> Tensor v'6 Int64

                      b_shape: 1-D. The shape of the second SparseTensor, size `[ndims]` Vector.

                      -> Tensor v'7 treal

                      thresh: 0-D. The magnitude threshold that determines if an output value/index - pair takes space.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                      (sum_indices, sum_values, sum_shape)

                      • sum_indices
                      • sum_values
                      • sum_shape

                      Adds two SparseTensor objects to produce another SparseTensor.

                      The input SparseTensor objects' indices are assumed ordered in standard + values must be consistent.

                      -> Tensor v'5 Int64

                      gradient_shape: Shape of the sparse gradient to be accumulated.

                      -> m' ControlNode 

                      sparseAccumulatorTakeGradient Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) 
                      => Tensor Ref ByteString

                      handle: The handle to a SparseConditionalAccumulator.

                      -> Tensor v'2 Int32

                      num_required: Number of gradients required before we return an aggregate.

                      -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

                      (indices, values, shape)

                      • indices: Indices of the average of the accumulated sparse gradients.
                      • values: Values of the average of the accumulated sparse gradients.
                      • shape: Shape of the average of the accumulated sparse gradients.

                      Extracts the average sparse gradient in a SparseConditionalAccumulator.

                      The op will blocks until sufficient (i.e., more than num_required) + gradients have been accumulated. If the accumulator has already + aggregated more than num_required gradients, it will return its + average of the accumulated gradients. Also automatically increments + the recorded global_step in the accumulator by 1, and resets the + aggregate to 0.

                      sparseAccumulatorTakeGradient' Source #

                      Arguments

                      :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) 
                      => OpParams 
                      -> Tensor Ref ByteString

                      handle: The handle to a SparseConditionalAccumulator.

                      -> Tensor v'2 Int32

                      num_required: Number of gradients required before we return an aggregate.

                      -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

                      (indices, values, shape)

                      • indices: Indices of the average of the accumulated sparse gradients.
                      • values: Values of the average of the accumulated sparse gradients.
                      • shape: Shape of the average of the accumulated sparse gradients.

                      sparseAdd Source #

                      Arguments

                      :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) 
                      => Tensor v'1 Int64

                      a_indices: 2-D. The indices of the first SparseTensor, size `[nnz, ndims]` Matrix.

                      -> Tensor v'2 t

                      a_values: 1-D. The values of the first SparseTensor, size `[nnz]` Vector.

                      -> Tensor v'3 Int64

                      a_shape: 1-D. The shape of the first SparseTensor, size `[ndims]` Vector.

                      -> Tensor v'4 Int64

                      b_indices: 2-D. The indices of the second SparseTensor, size `[nnz, ndims]` Matrix.

                      -> Tensor v'5 t

                      b_values: 1-D. The values of the second SparseTensor, size `[nnz]` Vector.

                      -> Tensor v'6 Int64

                      b_shape: 1-D. The shape of the second SparseTensor, size `[ndims]` Vector.

                      -> Tensor v'7 treal

                      thresh: 0-D. The magnitude threshold that determines if an output value/index + pair takes space.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                      (sum_indices, sum_values, sum_shape)

                      • sum_indices
                      • sum_values
                      • sum_shape

                      Adds two SparseTensor objects to produce another SparseTensor.

                      The input SparseTensor objects' indices are assumed ordered in standard lexicographic order. If this is not the case, before this step run SparseReorder to restore index ordering.

                      By default, if two values sum to zero at some index, the output SparseTensor would still include that particular location in its index, storing a zero in the @@ -2973,21 +3413,21 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core indicating that if the sum has a magnitude strictly smaller than thresh, its corresponding value and index would then not be included. In particular, `thresh == 0` (default) means everything is kept and actual thresholding happens - only for a positive value.

                      In the following shapes, nnz is the count after taking thresh into account.

                      sparseAdd'

                      Arguments

                      :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) 
                      => OpParams 
                      -> Tensor v'1 Int64

                      a_indices: 2-D. The indices of the first SparseTensor, size `[nnz, ndims]` Matrix.

                      -> Tensor v'2 t

                      a_values: 1-D. The values of the first SparseTensor, size `[nnz]` Vector.

                      -> Tensor v'3 Int64

                      a_shape: 1-D. The shape of the first SparseTensor, size `[ndims]` Vector.

                      -> Tensor v'4 Int64

                      b_indices: 2-D. The indices of the second SparseTensor, size `[nnz, ndims]` Matrix.

                      -> Tensor v'5 t

                      b_values: 1-D. The values of the second SparseTensor, size `[nnz]` Vector.

                      -> Tensor v'6 Int64

                      b_shape: 1-D. The shape of the second SparseTensor, size `[ndims]` Vector.

                      -> Tensor v'7 treal

                      thresh: 0-D. The magnitude threshold that determines if an output value/index - pair takes space.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                      (sum_indices, sum_values, sum_shape)

                      • sum_indices
                      • sum_values
                      • sum_shape

                      sparseAddGrad

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => Tensor v'1 t

                      backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to - the non-empty values of the sum.

                      -> Tensor v'2 Int64

                      a_indices: 2-D. The indices of the SparseTensor A, size `[nnz(A), ndims]`.

                      -> Tensor v'3 Int64

                      b_indices: 2-D. The indices of the SparseTensor B, size `[nnz(B), ndims]`.

                      -> Tensor v'4 Int64

                      sum_indices: 2-D. The indices of the sum SparseTensor, size - `[nnz(sum), ndims]`.

                      -> (Tensor Build t, Tensor Build t)

                      (a_val_grad, b_val_grad)

                      sparseDenseCwiseMul' Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      sp_values: 1-D. N non-empty values corresponding to sp_indices.

                      -> Tensor v'3 Int64

                      sp_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 t

                      dense: R-D. The dense Tensor operand.

                      -> Tensor Build t

                      output: 1-D. The N values that are operated on.

                      sparseFillEmptyRows Source #

                      Arguments

                      :: TensorType t 
                      => Tensor v'1 Int64

                      indices: 2-D. the indices of the sparse tensor.

                      -> Tensor v'2 t

                      values: 1-D. the values of the sparse tensor.

                      -> Tensor v'3 Int64

                      dense_shape: 1-D. the shape of the sparse tensor.

                      -> Tensor v'4 t

                      default_value: 0-D. default value to insert into location `[row, 0, ..., 0]` + for rows missing from the input sparse tensor. + output indices: 2-D. the indices of the filled sparse tensor.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Bool, Tensor Build Int64)

                      (output_indices, output_values, empty_row_indicator, reverse_index_map)

                      • output_indices
                      • output_values: 1-D. the values of the filled sparse tensor.
                      • empty_row_indicator: 1-D. whether the dense row was missing in the + input sparse tensor.
                      • reverse_index_map: 1-D. a map from the input indices to the output indices.

                      Fills empty rows in the input 2-D SparseTensor with a default value.

                      The input SparseTensor is represented via the tuple of inputs + (indices, values, dense_shape). The output SparseTensor has the + same dense_shape but with indices output_indices and values + output_values.

                      This op inserts a single entry for every row that doesn't have any values. + The index is created as `[row, 0, ..., 0]` and the inserted value + is default_value.

                      For example, suppose sp_input has shape `[5, 6]` and non-empty values:

                      0, 1
                      a
                      0, 3
                      b
                      2, 0
                      c
                      3, 1
                      d

                      Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:

                      0, 1
                      a
                      0, 3
                      b
                      1, 0
                      default_value
                      2, 0
                      c
                      3, 1
                      d
                      4, 0
                      default_value

                      The output SparseTensor will be in row-major order and will have the + same shape as the input.

                      This op also returns an indicator vector shaped `[dense_shape[0]]` such that

                      empty_row_indicator[i] = True iff row i was an empty row.

                      And a reverse index map vector shaped `[indices.shape[0]]` that is used during + backpropagation,

                      reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]

                      sparseFillEmptyRows' Source #

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      indices: 2-D. the indices of the sparse tensor.

                      -> Tensor v'2 t

                      values: 1-D. the values of the sparse tensor.

                      -> Tensor v'3 Int64

                      dense_shape: 1-D. the shape of the sparse tensor.

                      -> Tensor v'4 t

                      default_value: 0-D. default value to insert into location `[row, 0, ..., 0]` + for rows missing from the input sparse tensor. + output indices: 2-D. the indices of the filled sparse tensor.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Bool, Tensor Build Int64)

                      (output_indices, output_values, empty_row_indicator, reverse_index_map)

                      • output_indices
                      • output_values: 1-D. the values of the filled sparse tensor.
                      • empty_row_indicator: 1-D. whether the dense row was missing in the + input sparse tensor.
                      • reverse_index_map: 1-D. a map from the input indices to the output indices.

                      sparseFillEmptyRowsGrad Source #

                      Arguments

                      :: TensorType t 
                      => Tensor v'1 Int64

                      reverse_index_map: 1-D. The reverse index map from SparseFillEmptyRows.

                      -> Tensor v'2 t

                      grad_values: 1-D. The gradients from backprop.

                      -> (Tensor Build t, Tensor Build t)

                      (d_values, d_default_value)

                      • d_values: 1-D. The backprop into values.
                      • d_default_value: 0-D. The backprop into default_value.

                      The gradient of SparseFillEmptyRows.

                      Takes vectors reverse_index_map, shaped `[N]`, and grad_values, + shaped `[N_full]`, where `N_full >= N` and copies data into either + d_values or d_default_value. Here d_values is shaped `[N]` and + d_default_value is a scalar.

                      d_values[j] = grad_values[reverse_index_map[j]] + d_default_value = sum_{k : 0 .. N_full - 1} ( + grad_values[k] * 1{k not in reverse_index_map})

                      sparseFillEmptyRowsGrad' Source #

                      Arguments

                      :: TensorType t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      reverse_index_map: 1-D. The reverse index map from SparseFillEmptyRows.

                      -> Tensor v'2 t

                      grad_values: 1-D. The gradients from backprop.

                      -> (Tensor Build t, Tensor Build t)

                      (d_values, d_default_value)

                      • d_values: 1-D. The backprop into values.
                      • d_default_value: 0-D. The backprop into default_value.

                      sparseMatMul Source #

                      Arguments

                      :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) 
                      => Tensor v'1 ta

                      a

                      -> Tensor v'2 tb

                      b

                      -> Tensor Build Float

                      product

                      Multiply matrix "a" by matrix "b".

                      The inputs must be two-dimensional matrices and the inner dimension of "a" must match the outer dimension of "b". This op is optimized for the case where at least one of "a" or "b" is sparse. The breakeven for using this versus a dense - matrix multiply on one platform was 30% zero values in the sparse matrix.

                      sparseMatMul'

                      Arguments

                      :: (OneOf `[Word16, Float]` ta, OneOf `[Word16, Float]` tb) 
                      => OpParams 
                      -> Tensor v'1 ta

                      a

                      -> Tensor v'2 tb

                      b

                      -> Tensor Build Float

                      product

                      sparseReduceSum

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> Tensor Build t

                      output: `R-K`-D. The reduced Tensor.

                      Computes the sum of elements across dimensions of a SparseTensor.

                      This Op takes a SparseTensor and is the sparse counterpart to - `tf.reduce_sum()`. In particular, this Op also returns a dense Tensor + matrix multiply on one platform was 30% zero values in the sparse matrix.

                      The gradient computation of this operation will only take advantage of sparsity + in the input gradient when that gradient comes from a Relu.

                      sparseMatMul' Source #

                      Arguments

                      :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) 
                      => OpParams 
                      -> Tensor v'1 ta

                      a

                      -> Tensor v'2 tb

                      b

                      -> Tensor Build Float

                      product

                      sparseReduceMax Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> Tensor Build t

                      output: `R-K`-D. The reduced Tensor.

                      Computes the max of elements across dimensions of a SparseTensor.

                      This Op takes a SparseTensor and is the sparse counterpart to + `tf.reduce_max()`. In particular, this Op also returns a dense Tensor instead of a sparse one.

                      Reduces sp_input along the dimensions given in reduction_axes. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_axes. If keep_dims is true, the reduced dimensions are retained with length 1.

                      If reduction_axes has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, - which are interpreted according to the indexing rules in Python.

                      sparseReduceSum'

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> Tensor Build t

                      output: `R-K`-D. The reduced Tensor.

                      sparseReduceSumSparse

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                      (output_indices, output_values, output_shape)

                      • output_indices
                      • output_values
                      • output_shape

                      Computes the sum of elements across dimensions of a SparseTensor.

                      This Op takes a SparseTensor and is the sparse counterpart to + which are interpreted according to the indexing rules in Python.

                      sparseReduceMax' Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> Tensor Build t

                      output: `R-K`-D. The reduced Tensor.

                      sparseReduceMaxSparse Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                      (output_indices, output_values, output_shape)

                      • output_indices
                      • output_values
                      • output_shape

                      Computes the max of elements across dimensions of a SparseTensor.

                      This Op takes a SparseTensor and is the sparse counterpart to + `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a + SparseTensor.

                      Reduces sp_input along the dimensions given in reduction_axes. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_axes. If keep_dims is true, the reduced dimensions are retained + with length 1.

                      If reduction_axes has no entries, all dimensions are reduced, and a tensor + with a single element is returned. Additionally, the axes can be negative, + which are interpreted according to the indexing rules in Python.

                      sparseReduceMaxSparse' Source #

                      Arguments

                      :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                      (output_indices, output_values, output_shape)

                      • output_indices
                      • output_values
                      • output_shape

                      sparseReduceSum Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> Tensor Build t

                      output: `R-K`-D. The reduced Tensor.

                      Computes the sum of elements across dimensions of a SparseTensor.

                      This Op takes a SparseTensor and is the sparse counterpart to + `tf.reduce_sum()`. In particular, this Op also returns a dense Tensor + instead of a sparse one.

                      Reduces sp_input along the dimensions given in reduction_axes. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_axes. If keep_dims is true, the reduced dimensions are retained + with length 1.

                      If reduction_axes has no entries, all dimensions are reduced, and a tensor + with a single element is returned. Additionally, the axes can be negative, + which are interpreted according to the indexing rules in Python.

                      sparseReduceSum' Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> Tensor Build t

                      output: `R-K`-D. The reduced Tensor.

                      sparseReduceSumSparse Source #

                      Arguments

                      :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                      => Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                      (output_indices, output_values, output_shape)

                      • output_indices
                      • output_values
                      • output_shape

                      Computes the sum of elements across dimensions of a SparseTensor.

                      This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a SparseTensor.

                      Reduces sp_input along the dimensions given in reduction_axes. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_axes. If keep_dims is true, the reduced dimensions are retained with length 1.

                      If reduction_axes has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, - which are interpreted according to the indexing rules in Python.

                      sparseReduceSumSparse'

                      Arguments

                      :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                      => OpParams 
                      -> Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> Tensor v'4 Int32

                      reduction_axes: 1-D. Length-K vector containing the reduction axes.

                      -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                      (output_indices, output_values, output_shape)

                      • output_indices
                      • output_values
                      • output_shape

                      sparseReorder

                      Arguments

                      :: TensorType t 
                      => Tensor v'1 Int64

                      input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

                      -> Tensor v'2 t

                      input_values: 1-D. N non-empty values corresponding to input_indices.

                      -> Tensor v'3 Int64

                      input_shape: 1-D. Shape of the input SparseTensor.

                      -> (Tensor Build Int64, Tensor Build t)

                      (output_indices, output_values)

                      • output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but + which are interpreted according to the indexing rules in Python.

                        sparseReduceSumSparse' Source #

                        Arguments

                        :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                        => OpParams 
                        -> Tensor v'1 Int64

                        input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                        -> Tensor v'2 t

                        input_values: 1-D. N non-empty values corresponding to input_indices.

                        -> Tensor v'3 Int64

                        input_shape: 1-D. Shape of the input SparseTensor.

                        -> Tensor v'4 Int32

                        reduction_axes: 1-D. Length-K vector containing the reduction axes.

                        -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                        (output_indices, output_values, output_shape)

                        • output_indices
                        • output_values
                        • output_shape

                        sparseReorder Source #

                        Arguments

                        :: TensorType t 
                        => Tensor v'1 Int64

                        input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                        -> Tensor v'2 t

                        input_values: 1-D. N non-empty values corresponding to input_indices.

                        -> Tensor v'3 Int64

                        input_shape: 1-D. Shape of the input SparseTensor.

                        -> (Tensor Build Int64, Tensor Build t)

                        (output_indices, output_values)

                        • output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but in canonical row-major ordering.
                        • output_values: 1-D. N non-empty values corresponding to output_indices.

                        Reorders a SparseTensor into the canonical, row-major ordering.

                        Note that by convention, all sparse ops preserve the canonical ordering along increasing dimension number. The only time ordering can be violated is during manual manipulation of the indices and values vectors to add entries.

                        Reordering does not affect the shape of the SparseTensor.

                        If the tensor has rank R and N non-empty values, input_indices has - shape `[N, R]`, input_values has length N, and input_shape has length R.

                        sparseReorder'

                        Arguments

                        :: TensorType t 
                        => OpParams 
                        -> Tensor v'1 Int64

                        input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

                        -> Tensor v'2 t

                        input_values: 1-D. N non-empty values corresponding to input_indices.

                        -> Tensor v'3 Int64

                        input_shape: 1-D. Shape of the input SparseTensor.

                        -> (Tensor Build Int64, Tensor Build t)

                        (output_indices, output_values)

                        • output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but - in canonical row-major ordering.
                        • output_values: 1-D. N non-empty values corresponding to output_indices.

                        sparseReshape

                        Arguments

                        :: Tensor v'1 Int64

                        input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a - SparseTensor.

                        -> Tensor v'2 Int64

                        input_shape: 1-D. R_in vector with the input SparseTensor's dense shape.

                        -> Tensor v'3 Int64

                        new_shape: 1-D. R_out vector with the requested new dense shape.

                        -> (Tensor Build Int64, Tensor Build Int64)

                        (output_indices, output_shape)

                        • output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty + shape `[N, R]`, input_values has length N, and input_shape has length R.

                          sparseReorder' Source #

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

                          -> Tensor v'2 t

                          input_values: 1-D. N non-empty values corresponding to input_indices.

                          -> Tensor v'3 Int64

                          input_shape: 1-D. Shape of the input SparseTensor.

                          -> (Tensor Build Int64, Tensor Build t)

                          (output_indices, output_values)

                          • output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but + in canonical row-major ordering.
                          • output_values: 1-D. N non-empty values corresponding to output_indices.

                          sparseReshape Source #

                          Arguments

                          :: Tensor v'1 Int64

                          input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a + SparseTensor.

                          -> Tensor v'2 Int64

                          input_shape: 1-D. R_in vector with the input SparseTensor's dense shape.

                          -> Tensor v'3 Int64

                          new_shape: 1-D. R_out vector with the requested new dense shape.

                          -> (Tensor Build Int64, Tensor Build Int64)

                          (output_indices, output_shape)

                          • output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty values in the output SparseTensor.
                          • output_shape: 1-D. R_out vector with the full dense shape of the output SparseTensor. This is the same as new_shape but with any -1 dimensions filled in.

                          Reshapes a SparseTensor to represent values in a new dense shape.

                          This operation has the same semantics as reshape on the represented dense @@ -3090,58 +3602,69 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core originally implied by input_shape.

                          Reshaping does not affect the order of values in the SparseTensor.

                          If the input tensor has rank R_in and N non-empty values, and new_shape has length R_out, then input_indices has shape `[N, R_in]`, input_shape has length R_in, output_indices has shape `[N, R_out]`, and - output_shape has length R_out.

                          sparseReshape'

                          Arguments

                          :: OpParams 
                          -> Tensor v'1 Int64

                          input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a - SparseTensor.

                          -> Tensor v'2 Int64

                          input_shape: 1-D. R_in vector with the input SparseTensor's dense shape.

                          -> Tensor v'3 Int64

                          new_shape: 1-D. R_out vector with the requested new dense shape.

                          -> (Tensor Build Int64, Tensor Build Int64)

                          (output_indices, output_shape)

                          • output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty + output_shape has length R_out.

                            sparseReshape' Source #

                            Arguments

                            :: OpParams 
                            -> Tensor v'1 Int64

                            input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a + SparseTensor.

                            -> Tensor v'2 Int64

                            input_shape: 1-D. R_in vector with the input SparseTensor's dense shape.

                            -> Tensor v'3 Int64

                            new_shape: 1-D. R_out vector with the requested new dense shape.

                            -> (Tensor Build Int64, Tensor Build Int64)

                            (output_indices, output_shape)

                            • output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty values in the output SparseTensor.
                            • output_shape: 1-D. R_out vector with the full dense shape of the output SparseTensor. This is the same as new_shape but with any -1 dimensions - filled in.

                            sparseSegmentMean

                            Arguments

                            :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                            => Tensor v'1 t

                            data

                            -> Tensor v'2 tidx

                            indices: A 1-D tensor. Has same rank as segment_ids.

                            -> Tensor v'3 Int32

                            segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                            -> Tensor Build t

                            output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                            Computes the mean along sparse segments of a tensor.

                            Read the section on - Segmentation for an explanation - of segments.

                            Like SegmentMean, but segment_ids can have rank less than `data`'s first - dimension, selecting a subset of dimension 0, specified by indices.

                            sparseSegmentMean'

                            Arguments

                            :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                            => OpParams 
                            -> Tensor v'1 t

                            data

                            -> Tensor v'2 tidx

                            indices: A 1-D tensor. Has same rank as segment_ids.

                            -> Tensor v'3 Int32

                            segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                            -> Tensor Build t

                            output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                            sparseSegmentMeanGrad

                            Arguments

                            :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                            => Tensor v'1 t

                            grad: gradient propagated to the SparseSegmentMean op.

                            -> Tensor v'2 tidx

                            indices: indices passed to the corresponding SparseSegmentMean op.

                            -> Tensor v'3 Int32

                            segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.

                            -> Tensor v'4 Int32

                            output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.

                            -> Tensor Build t

                            output

                            Computes gradients for SparseSegmentMean.

                            Returns tensor "output" with same shape as grad, except for dimension 0 whose - value is output_dim0.

                            sparseSegmentMeanGrad'

                            Arguments

                            :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                            => OpParams 
                            -> Tensor v'1 t

                            grad: gradient propagated to the SparseSegmentMean op.

                            -> Tensor v'2 tidx

                            indices: indices passed to the corresponding SparseSegmentMean op.

                            -> Tensor v'3 Int32

                            segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.

                            -> Tensor v'4 Int32

                            output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.

                            -> Tensor Build t

                            output

                            sparseSegmentSqrtN

                            Arguments

                            :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                            => Tensor v'1 t

                            data

                            -> Tensor v'2 tidx

                            indices: A 1-D tensor. Has same rank as segment_ids.

                            -> Tensor v'3 Int32

                            segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                            -> Tensor Build t

                            output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                            Computes the sum along sparse segments of a tensor divided by the sqrt of N.

                            N is the size of the segment being reduced.

                            Read the section on - Segmentation for an explanation - of segments.

                            sparseSegmentSqrtN'

                            Arguments

                            :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                            => OpParams 
                            -> Tensor v'1 t

                            data

                            -> Tensor v'2 tidx

                            indices: A 1-D tensor. Has same rank as segment_ids.

                            -> Tensor v'3 Int32

                            segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                            -> Tensor Build t

                            output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                            sparseSegmentSqrtNGrad

                            Arguments

                            :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                            => Tensor v'1 t

                            grad: gradient propagated to the SparseSegmentSqrtN op.

                            -> Tensor v'2 tidx

                            indices: indices passed to the corresponding SparseSegmentSqrtN op.

                            -> Tensor v'3 Int32

                            segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.

                            -> Tensor v'4 Int32

                            output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.

                            -> Tensor Build t

                            output

                            Computes gradients for SparseSegmentSqrtN.

                            Returns tensor "output" with same shape as grad, except for dimension 0 whose - value is output_dim0.

                            sparseSegmentSqrtNGrad'

                            Arguments

                            :: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                            => OpParams 
                            -> Tensor v'1 t

                            grad: gradient propagated to the SparseSegmentSqrtN op.

                            -> Tensor v'2 tidx

                            indices: indices passed to the corresponding SparseSegmentSqrtN op.

                            -> Tensor v'3 Int32

                            segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.

                            -> Tensor v'4 Int32

                            output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.

                            -> Tensor Build t

                            output

                            sparseSegmentSum

                            Arguments

                            :: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                            => Tensor v'1 t

                            data

                            -> Tensor v'2 tidx

                            indices: A 1-D tensor. Has same rank as segment_ids.

                            -> Tensor v'3 Int32

                            segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                            -> Tensor Build t

                            output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                            Computes the sum along sparse segments of a tensor.

                            Read the section on - Segmentation for an explanation - of segments.

                            Like SegmentSum, but segment_ids can have rank less than `data`'s first - dimension, selecting a subset of dimension 0, specified by indices.

                            For example:

                            ```prettyprint + filled in.

                          sparseSegmentMean Source #

                          Arguments

                          :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => Tensor v'1 t

                          data

                          -> Tensor v'2 tidx

                          indices: A 1-D tensor. Has same rank as segment_ids.

                          -> Tensor v'3 Int32

                          segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                          -> Tensor Build t

                          output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                          Computes the mean along sparse segments of a tensor.

                          Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                          Like SegmentMean, but segment_ids can have rank less than `data`'s first + dimension, selecting a subset of dimension 0, specified by indices.

                          sparseSegmentMean' Source #

                          Arguments

                          :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => OpParams 
                          -> Tensor v'1 t

                          data

                          -> Tensor v'2 tidx

                          indices: A 1-D tensor. Has same rank as segment_ids.

                          -> Tensor v'3 Int32

                          segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                          -> Tensor Build t

                          output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                          sparseSegmentMeanGrad Source #

                          Arguments

                          :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => Tensor v'1 t

                          grad: gradient propagated to the SparseSegmentMean op.

                          -> Tensor v'2 tidx

                          indices: indices passed to the corresponding SparseSegmentMean op.

                          -> Tensor v'3 Int32

                          segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.

                          -> Tensor v'4 Int32

                          output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.

                          -> Tensor Build t

                          output

                          Computes gradients for SparseSegmentMean.

                          Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is output_dim0.

                          sparseSegmentMeanGrad' Source #

                          Arguments

                          :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => OpParams 
                          -> Tensor v'1 t

                          grad: gradient propagated to the SparseSegmentMean op.

                          -> Tensor v'2 tidx

                          indices: indices passed to the corresponding SparseSegmentMean op.

                          -> Tensor v'3 Int32

                          segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.

                          -> Tensor v'4 Int32

                          output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.

                          -> Tensor Build t

                          output

                          sparseSegmentSqrtN Source #

                          Arguments

                          :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => Tensor v'1 t

                          data

                          -> Tensor v'2 tidx

                          indices: A 1-D tensor. Has same rank as segment_ids.

                          -> Tensor v'3 Int32

                          segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                          -> Tensor Build t

                          output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                          Computes the sum along sparse segments of a tensor divided by the sqrt of N.

                          N is the size of the segment being reduced.

                          Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                          sparseSegmentSqrtN' Source #

                          Arguments

                          :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => OpParams 
                          -> Tensor v'1 t

                          data

                          -> Tensor v'2 tidx

                          indices: A 1-D tensor. Has same rank as segment_ids.

                          -> Tensor v'3 Int32

                          segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                          -> Tensor Build t

                          output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                          sparseSegmentSqrtNGrad Source #

                          Arguments

                          :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => Tensor v'1 t

                          grad: gradient propagated to the SparseSegmentSqrtN op.

                          -> Tensor v'2 tidx

                          indices: indices passed to the corresponding SparseSegmentSqrtN op.

                          -> Tensor v'3 Int32

                          segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.

                          -> Tensor v'4 Int32

                          output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.

                          -> Tensor Build t

                          output

                          Computes gradients for SparseSegmentSqrtN.

                          Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is output_dim0.

                          sparseSegmentSqrtNGrad' Source #

                          Arguments

                          :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => OpParams 
                          -> Tensor v'1 t

                          grad: gradient propagated to the SparseSegmentSqrtN op.

                          -> Tensor v'2 tidx

                          indices: indices passed to the corresponding SparseSegmentSqrtN op.

                          -> Tensor v'3 Int32

                          segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.

                          -> Tensor v'4 Int32

                          output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.

                          -> Tensor Build t

                          output

                          sparseSegmentSum Source #

                          Arguments

                          :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => Tensor v'1 t

                          data

                          -> Tensor v'2 tidx

                          indices: A 1-D tensor. Has same rank as segment_ids.

                          -> Tensor v'3 Int32

                          segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                          -> Tensor Build t

                          output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                          Computes the sum along sparse segments of a tensor.

                          Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                          Like SegmentSum, but segment_ids can have rank less than `data`'s first + dimension, selecting a subset of dimension 0, specified by indices.

                          For example:

                          ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])

                          # Select two rows, one segment. tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) - ==> [[0 0 0 0]]

                          # Select two rows, two segment. + # => [[0 0 0 0]]

                          # Select two rows, two segment. tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) - ==> [[ 1 2 3 4] - [-1 -2 -3 -4]]

                          # Select all rows, two segments. + # => [[ 1 2 3 4] + # [-1 -2 -3 -4]]

                          # Select all rows, two segments. tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) - ==> [[0 0 0 0] - [5 6 7 8]]

                          # Which is equivalent to: + # => [[0 0 0 0] + # [5 6 7 8]]

                          # Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1])) - ```

                          sparseSegmentSum'

                          Arguments

                          :: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                          => OpParams 
                          -> Tensor v'1 t

                          data

                          -> Tensor v'2 tidx

                          indices: A 1-D tensor. Has same rank as segment_ids.

                          -> Tensor v'3 Int32

                          segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                          -> Tensor Build t

                          output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

                          sparseSoftmax

                          Arguments

                          :: OneOf `[Double, Float]` t 
                          => Tensor v'1 Int64

                          sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a - SparseTensor, in canonical ordering.

                          -> Tensor v'2 t

                          sp_values: 1-D. NNZ non-empty values corresponding to sp_indices.

                          -> Tensor v'3 Int64

                          sp_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor Build t

                          output: 1-D. The NNZ values for the result SparseTensor.

                          Applies softmax to a batched N-D SparseTensor.

                          The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` + ```

                          sparseSegmentSum' Source #

                          Arguments

                          :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => OpParams 
                          -> Tensor v'1 t

                          data

                          -> Tensor v'2 tidx

                          indices: A 1-D tensor. Has same rank as segment_ids.

                          -> Tensor v'3 Int32

                          segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

                          -> Tensor Build t

                          output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

                          sparseSlice Source #

                          Arguments

                          :: TensorType t 
                          => Tensor v'1 Int64

                          indices: 2-D tensor represents the indices of the sparse tensor.

                          -> Tensor v'2 t

                          values: 1-D tensor represents the values of the sparse tensor.

                          -> Tensor v'3 Int64

                          shape: 1-D. tensor represents the shape of the sparse tensor.

                          -> Tensor v'4 Int64

                          start: 1-D. tensor represents the start of the slice.

                          -> Tensor v'5 Int64

                          size: 1-D. tensor represents the size of the slice. + output indices: A list of 1-D tensors represents the indices of the output + sparse tensors.

                          -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                          (output_indices, output_values, output_shape)

                          • output_indices
                          • output_values: A list of 1-D tensors represents the values of the output sparse + tensors.
                          • output_shape: A list of 1-D tensors represents the shape of the output sparse + tensors.

                          Slice a SparseTensor based on the start and size.

                          For example, if the input is

                          input_tensor = shape = [2, 7] + [ a d e ] + [b c ]

                          Graphically the output tensors are:

                          sparse_slice([0, 0], [2, 4]) = shape = [2, 4] + [ a ] + [b c ]

                          sparse_slice([0, 4], [2, 3]) = shape = [2, 3] + [ d e ] + [ ]

                          sparseSlice' Source #

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          indices: 2-D tensor represents the indices of the sparse tensor.

                          -> Tensor v'2 t

                          values: 1-D tensor represents the values of the sparse tensor.

                          -> Tensor v'3 Int64

                          shape: 1-D. tensor represents the shape of the sparse tensor.

                          -> Tensor v'4 Int64

                          start: 1-D. tensor represents the start of the slice.

                          -> Tensor v'5 Int64

                          size: 1-D. tensor represents the size of the slice. + output indices: A list of 1-D tensors represents the indices of the output + sparse tensors.

                          -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                          (output_indices, output_values, output_shape)

                          • output_indices
                          • output_values: A list of 1-D tensors represents the values of the output sparse + tensors.
                          • output_shape: A list of 1-D tensors represents the shape of the output sparse + tensors.

                          sparseSoftmax Source #

                          Arguments

                          :: OneOf '[Double, Float] t 
                          => Tensor v'1 Int64

                          sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a + SparseTensor, in canonical ordering.

                          -> Tensor v'2 t

                          sp_values: 1-D. NNZ non-empty values corresponding to sp_indices.

                          -> Tensor v'3 Int64

                          sp_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor Build t

                          output: 1-D. The NNZ values for the result SparseTensor.

                          Applies softmax to a batched N-D SparseTensor.

                          The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` (where `N >= 2`), and with indices sorted in the canonical lexicographic order.

                          This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost logical submatrix with shape `[B, C]`, but with the catch that *the implicitly zero elements do not participate*. Specifically, the algorithm is equivalent to the following:

                          1. Applies `tf.nn.softmax()` to a densified view of each innermost submatrix with shape `[B, C]`, along the size-C dimension;
                          2. Masks out the original implicitly-zero locations;
                          3. Renormalizes the remaining elements.

                          Hence, the SparseTensor result has exactly the same non-zero indices and - shape.

                          sparseSoftmax'

                          Arguments

                          :: OneOf `[Double, Float]` t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a - SparseTensor, in canonical ordering.

                          -> Tensor v'2 t

                          sp_values: 1-D. NNZ non-empty values corresponding to sp_indices.

                          -> Tensor v'3 Int64

                          sp_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor Build t

                          output: 1-D. The NNZ values for the result SparseTensor.

                          sparseSoftmaxCrossEntropyWithLogits

                          Arguments

                          :: (OneOf `[Word16, Double, Float]` t, OneOf `[Int32, Int64]` tlabels) 
                          => Tensor v'1 t

                          features: batch_size x num_classes matrix

                          -> Tensor v'2 tlabels

                          labels: batch_size vector with values in [0, num_classes). - This is the label for the given minibatch entry.

                          -> (Tensor Build t, Tensor Build t)

                          (loss, backprop)

                          • loss: Per example loss (batch_size vector).
                          • backprop: backpropagated gradients (batch_size x num_classes matrix).

                          Computes softmax cross entropy cost and gradients to backpropagate.

                          Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept + shape.

                          sparseSoftmax' Source #

                          Arguments

                          :: OneOf '[Double, Float] t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a + SparseTensor, in canonical ordering.

                          -> Tensor v'2 t

                          sp_values: 1-D. NNZ non-empty values corresponding to sp_indices.

                          -> Tensor v'3 Int64

                          sp_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor Build t

                          output: 1-D. The NNZ values for the result SparseTensor.

                          sparseSoftmaxCrossEntropyWithLogits Source #

                          Arguments

                          :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) 
                          => Tensor v'1 t

                          features: batch_size x num_classes matrix

                          -> Tensor v'2 tlabels

                          labels: batch_size vector with values in [0, num_classes). + This is the label for the given minibatch entry.

                          -> (Tensor Build t, Tensor Build t)

                          (loss, backprop)

                          • loss: Per example loss (batch_size vector).
                          • backprop: backpropagated gradients (batch_size x num_classes matrix).

                          Computes softmax cross entropy cost and gradients to backpropagate.

                          Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept a matrix of label probabilities, but rather a single label per row of features. This label is considered to have probability 1.0 for the - given row.

                          Inputs are the logits, not probabilities.

                          sparseSoftmaxCrossEntropyWithLogits'

                          Arguments

                          :: (OneOf `[Word16, Double, Float]` t, OneOf `[Int32, Int64]` tlabels) 
                          => OpParams 
                          -> Tensor v'1 t

                          features: batch_size x num_classes matrix

                          -> Tensor v'2 tlabels

                          labels: batch_size vector with values in [0, num_classes). - This is the label for the given minibatch entry.

                          -> (Tensor Build t, Tensor Build t)

                          (loss, backprop)

                          • loss: Per example loss (batch_size vector).
                          • backprop: backpropagated gradients (batch_size x num_classes matrix).

                          sparseSparseMaximum

                          Arguments

                          :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                          => Tensor v'1 Int64

                          a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, in the canonical lexicographic ordering.

                          -> Tensor v'2 t

                          a_values: 1-D. N non-empty values corresponding to a_indices.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor v'4 Int64

                          b_indices: counterpart to a_indices for the other operand.

                          -> Tensor v'5 t

                          b_values: counterpart to a_values for the other operand; must be of the same dtype.

                          -> Tensor v'6 Int64

                          b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

                          -> (Tensor Build Int64, Tensor Build t)

                          (output_indices, output_values)

                          • output_indices: 2-D. The indices of the output SparseTensor.
                          • output_values: 1-D. The values of the output SparseTensor.

                          Returns the element-wise max of two SparseTensors.

                          Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

                          sparseSparseMaximum'

                          Arguments

                          :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, in the canonical lexicographic ordering.

                          -> Tensor v'2 t

                          a_values: 1-D. N non-empty values corresponding to a_indices.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor v'4 Int64

                          b_indices: counterpart to a_indices for the other operand.

                          -> Tensor v'5 t

                          b_values: counterpart to a_values for the other operand; must be of the same dtype.

                          -> Tensor v'6 Int64

                          b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

                          -> (Tensor Build Int64, Tensor Build t)

                          (output_indices, output_values)

                          • output_indices: 2-D. The indices of the output SparseTensor.
                          • output_values: 1-D. The values of the output SparseTensor.

                          sparseSparseMinimum

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                          => Tensor v'1 Int64

                          a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, in the canonical lexicographic ordering.

                          -> Tensor v'2 t

                          a_values: 1-D. N non-empty values corresponding to a_indices.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor v'4 Int64

                          b_indices: counterpart to a_indices for the other operand.

                          -> Tensor v'5 t

                          b_values: counterpart to a_values for the other operand; must be of the same dtype.

                          -> Tensor v'6 Int64

                          b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

                          -> (Tensor Build Int64, Tensor Build t)

                          (output_indices, output_values)

                          • output_indices: 2-D. The indices of the output SparseTensor.
                          • output_values: 1-D. The values of the output SparseTensor.

                          Returns the element-wise min of two SparseTensors.

                          Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

                          sparseSparseMinimum'

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, in the canonical lexicographic ordering.

                          -> Tensor v'2 t

                          a_values: 1-D. N non-empty values corresponding to a_indices.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor v'4 Int64

                          b_indices: counterpart to a_indices for the other operand.

                          -> Tensor v'5 t

                          b_values: counterpart to a_values for the other operand; must be of the same dtype.

                          -> Tensor v'6 Int64

                          b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

                          -> (Tensor Build Int64, Tensor Build t)

                          (output_indices, output_values)

                          • output_indices: 2-D. The indices of the output SparseTensor.
                          • output_values: 1-D. The values of the output SparseTensor.

                          sparseSplit

                          Arguments

                          :: TensorType t 
                          => Int64

                          num_split: The number of ways to split.

                          -> Tensor v'1 Int64

                          split_dim: 0-D. The dimension along which to split. Must be in the range - `[0, rank(shape))`.

                          -> Tensor v'2 Int64

                          indices: 2-D tensor represents the indices of the sparse tensor.

                          -> Tensor v'3 t

                          values: 1-D tensor represents the values of the sparse tensor.

                          -> Tensor v'4 Int64

                          shape: 1-D. tensor represents the shape of the sparse tensor. + given row.

                          Inputs are the logits, not probabilities.

                          sparseSoftmaxCrossEntropyWithLogits' Source #

                          Arguments

                          :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) 
                          => OpParams 
                          -> Tensor v'1 t

                          features: batch_size x num_classes matrix

                          -> Tensor v'2 tlabels

                          labels: batch_size vector with values in [0, num_classes). + This is the label for the given minibatch entry.

                          -> (Tensor Build t, Tensor Build t)

                          (loss, backprop)

                          • loss: Per example loss (batch_size vector).
                          • backprop: backpropagated gradients (batch_size x num_classes matrix).

                          sparseSparseMaximum Source #

                          Arguments

                          :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                          => Tensor v'1 Int64

                          a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

                          -> Tensor v'2 t

                          a_values: 1-D. N non-empty values corresponding to a_indices.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor v'4 Int64

                          b_indices: counterpart to a_indices for the other operand.

                          -> Tensor v'5 t

                          b_values: counterpart to a_values for the other operand; must be of the same dtype.

                          -> Tensor v'6 Int64

                          b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

                          -> (Tensor Build Int64, Tensor Build t)

                          (output_indices, output_values)

                          • output_indices: 2-D. The indices of the output SparseTensor.
                          • output_values: 1-D. The values of the output SparseTensor.

                          Returns the element-wise max of two SparseTensors.

                          Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

                          sparseSparseMaximum' Source #

                          Arguments

                          :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

                          -> Tensor v'2 t

                          a_values: 1-D. N non-empty values corresponding to a_indices.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor v'4 Int64

                          b_indices: counterpart to a_indices for the other operand.

                          -> Tensor v'5 t

                          b_values: counterpart to a_values for the other operand; must be of the same dtype.

                          -> Tensor v'6 Int64

                          b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

                          -> (Tensor Build Int64, Tensor Build t)

                          (output_indices, output_values)

                          • output_indices: 2-D. The indices of the output SparseTensor.
                          • output_values: 1-D. The values of the output SparseTensor.

                          sparseSparseMinimum Source #

                          Arguments

                          :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                          => Tensor v'1 Int64

                          a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

                          -> Tensor v'2 t

                          a_values: 1-D. N non-empty values corresponding to a_indices.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor v'4 Int64

                          b_indices: counterpart to a_indices for the other operand.

                          -> Tensor v'5 t

                          b_values: counterpart to a_values for the other operand; must be of the same dtype.

                          -> Tensor v'6 Int64

                          b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

                          -> (Tensor Build Int64, Tensor Build t)

                          (output_indices, output_values)

                          • output_indices: 2-D. The indices of the output SparseTensor.
                          • output_values: 1-D. The values of the output SparseTensor.

                          Returns the element-wise min of two SparseTensors.

                          Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

                          sparseSparseMinimum' Source #

                          Arguments

                          :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

                          -> Tensor v'2 t

                          a_values: 1-D. N non-empty values corresponding to a_indices.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. Shape of the input SparseTensor.

                          -> Tensor v'4 Int64

                          b_indices: counterpart to a_indices for the other operand.

                          -> Tensor v'5 t

                          b_values: counterpart to a_values for the other operand; must be of the same dtype.

                          -> Tensor v'6 Int64

                          b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

                          -> (Tensor Build Int64, Tensor Build t)

                          (output_indices, output_values)

                          • output_indices: 2-D. The indices of the output SparseTensor.
                          • output_values: 1-D. The values of the output SparseTensor.

                          sparseSplit Source #

                          Arguments

                          :: TensorType t 
                          => Int64

                          num_split: The number of ways to split.

                          -> Tensor v'1 Int64

                          split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(shape))`.

                          -> Tensor v'2 Int64

                          indices: 2-D tensor represents the indices of the sparse tensor.

                          -> Tensor v'3 t

                          values: 1-D tensor represents the values of the sparse tensor.

                          -> Tensor v'4 Int64

                          shape: 1-D. tensor represents the shape of the sparse tensor. output indices: A list of 1-D tensors represents the indices of the output - sparse tensors.

                          -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])

                          (output_indices, output_values, output_shape)

                          • output_indices
                          • output_values: A list of 1-D tensors represents the values of the output sparse + sparse tensors.

                          -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])

                          (output_indices, output_values, output_shape)

                          • output_indices
                          • output_values: A list of 1-D tensors represents the values of the output sparse tensors.
                          • output_shape: A list of 1-D tensors represents the shape of the output sparse tensors.

                          Split a SparseTensor into num_split tensors along one dimension.

                          If the `shape[split_dim]` is not an integer multiple of num_split. Slices `[0 : shape[split_dim] % num_split]` gets one extra dimension. @@ -3151,21 +3674,21 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [ a ] [b c ]

                          output_tensor[1] = shape = [2, 3] [ d e ] - [ ]

                          sparseSplit'

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Int64

                          num_split: The number of ways to split.

                          -> Tensor v'1 Int64

                          split_dim: 0-D. The dimension along which to split. Must be in the range - `[0, rank(shape))`.

                          -> Tensor v'2 Int64

                          indices: 2-D tensor represents the indices of the sparse tensor.

                          -> Tensor v'3 t

                          values: 1-D tensor represents the values of the sparse tensor.

                          -> Tensor v'4 Int64

                          shape: 1-D. tensor represents the shape of the sparse tensor. + [ ]

                          sparseSplit' Source #

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Int64

                          num_split: The number of ways to split.

                          -> Tensor v'1 Int64

                          split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(shape))`.

                          -> Tensor v'2 Int64

                          indices: 2-D tensor represents the indices of the sparse tensor.

                          -> Tensor v'3 t

                          values: 1-D tensor represents the values of the sparse tensor.

                          -> Tensor v'4 Int64

                          shape: 1-D. tensor represents the shape of the sparse tensor. output indices: A list of 1-D tensors represents the indices of the output - sparse tensors.

                          -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])

                          (output_indices, output_values, output_shape)

                          • output_indices
                          • output_values: A list of 1-D tensors represents the values of the output sparse + sparse tensors.

                          -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])

                          (output_indices, output_values, output_shape)

                          • output_indices
                          • output_values: A list of 1-D tensors represents the values of the output sparse tensors.
                          • output_shape: A list of 1-D tensors represents the shape of the output sparse - tensors.

                          sparseTensorDenseAdd

                          Arguments

                          :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                          => Tensor v'1 tindices

                          a_indices: 2-D. The indices of the SparseTensor, with shape `[nnz, ndims]`.

                          -> Tensor v'2 t

                          a_values: 1-D. The values of the SparseTensor, with shape `[nnz]`.

                          -> Tensor v'3 tindices

                          a_shape: 1-D. The shape of the SparseTensor, with shape `[ndims]`.

                          -> Tensor v'4 t

                          b: ndims-D Tensor. With shape a_shape.

                          -> Tensor Build t

                          output

                          Adds up a SparseTensor and a dense Tensor, producing a dense Tensor.

                          This Op does not require a_indices be sorted in standard lexicographic order.

                          sparseTensorDenseAdd'

                          Arguments

                          :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                          => OpParams 
                          -> Tensor v'1 tindices

                          a_indices: 2-D. The indices of the SparseTensor, with shape `[nnz, ndims]`.

                          -> Tensor v'2 t

                          a_values: 1-D. The values of the SparseTensor, with shape `[nnz]`.

                          -> Tensor v'3 tindices

                          a_shape: 1-D. The shape of the SparseTensor, with shape `[ndims]`.

                          -> Tensor v'4 t

                          b: ndims-D Tensor. With shape a_shape.

                          -> Tensor Build t

                          output

                          sparseTensorDenseMatMul

                          Arguments

                          :: TensorType t 
                          => Tensor v'1 Int64

                          a_indices: 2-D. The indices of the SparseTensor, size `[nnz, 2]` Matrix.

                          -> Tensor v'2 t

                          a_values: 1-D. The values of the SparseTensor, size `[nnz]` Vector.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. The shape of the SparseTensor, size `[2]` Vector.

                          -> Tensor v'4 t

                          b: 2-D. A dense Matrix.

                          -> Tensor Build t

                          product

                          Multiply SparseTensor (of rank 2) A by dense matrix B.

                          No validity checking is performed on the indices of A. However, the following + tensors.

                          sparseTensorDenseAdd Source #

                          Arguments

                          :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                          => Tensor v'1 tindices

                          a_indices: 2-D. The indices of the SparseTensor, with shape `[nnz, ndims]`.

                          -> Tensor v'2 t

                          a_values: 1-D. The values of the SparseTensor, with shape `[nnz]`.

                          -> Tensor v'3 tindices

                          a_shape: 1-D. The shape of the SparseTensor, with shape `[ndims]`.

                          -> Tensor v'4 t

                          b: ndims-D Tensor. With shape a_shape.

                          -> Tensor Build t

                          output

                          Adds up a SparseTensor and a dense Tensor, producing a dense Tensor.

                          This Op does not require a_indices be sorted in standard lexicographic order.

                          sparseTensorDenseAdd' Source #

                          Arguments

                          :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                          => OpParams 
                          -> Tensor v'1 tindices

                          a_indices: 2-D. The indices of the SparseTensor, with shape `[nnz, ndims]`.

                          -> Tensor v'2 t

                          a_values: 1-D. The values of the SparseTensor, with shape `[nnz]`.

                          -> Tensor v'3 tindices

                          a_shape: 1-D. The shape of the SparseTensor, with shape `[ndims]`.

                          -> Tensor v'4 t

                          b: ndims-D Tensor. With shape a_shape.

                          -> Tensor Build t

                          output

                          sparseTensorDenseMatMul Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] tindices) 
                          => Tensor v'1 tindices

                          a_indices: 2-D. The indices of the SparseTensor, size `[nnz, 2]` Matrix.

                          -> Tensor v'2 t

                          a_values: 1-D. The values of the SparseTensor, size `[nnz]` Vector.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. The shape of the SparseTensor, size `[2]` Vector.

                          -> Tensor v'4 t

                          b: 2-D. A dense Matrix.

                          -> Tensor Build t

                          product

                          Multiply SparseTensor (of rank 2) A by dense matrix B.

                          No validity checking is performed on the indices of A. However, the following input format is recommended for optimal behavior:

                          if adjoint_a == false: A should be sorted in lexicographically increasing order. Use SparseReorder if you're not sure. if adjoint_a == true: A should be sorted in order of increasing dimension 1 (i.e., "column major" - order instead of "row major" order).

                          sparseTensorDenseMatMul'

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          a_indices: 2-D. The indices of the SparseTensor, size `[nnz, 2]` Matrix.

                          -> Tensor v'2 t

                          a_values: 1-D. The values of the SparseTensor, size `[nnz]` Vector.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. The shape of the SparseTensor, size `[2]` Vector.

                          -> Tensor v'4 t

                          b: 2-D. A dense Matrix.

                          -> Tensor Build t

                          product

                          sparseToDense

                          Arguments

                          :: (TensorType t, OneOf `[Int32, Int64]` tindices) 
                          => Tensor v'1 tindices

                          sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete - index where `sparse_values[i]` will be placed.

                          -> Tensor v'2 tindices

                          output_shape: 1-D. Shape of the dense output tensor.

                          -> Tensor v'3 t

                          sparse_values: 1-D. Values corresponding to each row of sparse_indices, - or a scalar value to be used for all sparse indices.

                          -> Tensor v'4 t

                          default_value: Scalar value to set for indices not specified in - sparse_indices.

                          -> Tensor Build t

                          dense: Dense output tensor of shape output_shape.

                          Converts a sparse representation into a dense tensor.

                          Builds an array dense with shape output_shape such that

                          ```prettyprint + order instead of "row major" order).

                          sparseTensorDenseMatMul' Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] tindices) 
                          => OpParams 
                          -> Tensor v'1 tindices

                          a_indices: 2-D. The indices of the SparseTensor, size `[nnz, 2]` Matrix.

                          -> Tensor v'2 t

                          a_values: 1-D. The values of the SparseTensor, size `[nnz]` Vector.

                          -> Tensor v'3 Int64

                          a_shape: 1-D. The shape of the SparseTensor, size `[2]` Vector.

                          -> Tensor v'4 t

                          b: 2-D. A dense Matrix.

                          -> Tensor Build t

                          product

                          sparseTensorSliceDataset Source #

                          Arguments

                          :: (MonadBuild m', TensorType tvalues) 
                          => Tensor v'1 Int64

                          indices

                          -> Tensor v'2 tvalues

                          values

                          -> Tensor v'3 Int64

                          dense_shape

                          -> m' (Tensor Value ResourceHandle)

                          handle

                          Creates a dataset that splits a SparseTensor into elements row-wise.

                          sparseTensorSliceDataset' Source #

                          Arguments

                          :: (MonadBuild m', TensorType tvalues) 
                          => OpParams 
                          -> Tensor v'1 Int64

                          indices

                          -> Tensor v'2 tvalues

                          values

                          -> Tensor v'3 Int64

                          dense_shape

                          -> m' (Tensor Value ResourceHandle)

                          handle

                          sparseToDense Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] tindices) 
                          => Tensor v'1 tindices

                          sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

                          -> Tensor v'2 tindices

                          output_shape: 1-D. Shape of the dense output tensor.

                          -> Tensor v'3 t

                          sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

                          -> Tensor v'4 t

                          default_value: Scalar value to set for indices not specified in + sparse_indices.

                          -> Tensor Build t

                          dense: Dense output tensor of shape output_shape.

                          Converts a sparse representation into a dense tensor.

                          Builds an array dense with shape output_shape such that

                          ``` # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value)

                          # If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i]

                          # If sparse_indices is an n by d matrix, then for each i in [0, n) @@ -3173,74 +3696,83 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core ```

                          All other values in dense are set to default_value. If sparse_values is a scalar, all sparse indices are set to this single value.

                          Indices should be sorted in lexicographic order, and indices must not contain any repeats. If validate_indices is true, these properties - are checked during execution.

                          sparseToDense'

                          Arguments

                          :: (TensorType t, OneOf `[Int32, Int64]` tindices) 
                          => OpParams 
                          -> Tensor v'1 tindices

                          sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete - index where `sparse_values[i]` will be placed.

                          -> Tensor v'2 tindices

                          output_shape: 1-D. Shape of the dense output tensor.

                          -> Tensor v'3 t

                          sparse_values: 1-D. Values corresponding to each row of sparse_indices, - or a scalar value to be used for all sparse indices.

                          -> Tensor v'4 t

                          default_value: Scalar value to set for indices not specified in - sparse_indices.

                          -> Tensor Build t

                          dense: Dense output tensor of shape output_shape.

                          sparseToSparseSetOperation

                          Arguments

                          :: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
                          => Tensor v'1 Int64

                          set1_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major - order.

                          -> Tensor v'2 t

                          set1_values: 1D Tensor, values of a SparseTensor. Must be in row-major - order.

                          -> Tensor v'3 Int64

                          set1_shape: 1D Tensor, shape of a SparseTensor. `set1_shape[0...n-1]` must + are checked during execution.

                          sparseToDense' Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] tindices) 
                          => OpParams 
                          -> Tensor v'1 tindices

                          sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

                          -> Tensor v'2 tindices

                          output_shape: 1-D. Shape of the dense output tensor.

                          -> Tensor v'3 t

                          sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

                          -> Tensor v'4 t

                          default_value: Scalar value to set for indices not specified in + sparse_indices.

                          -> Tensor Build t

                          dense: Dense output tensor of shape output_shape.

                          sparseToSparseSetOperation Source #

                          Arguments

                          :: OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t 
                          => Tensor v'1 Int64

                          set1_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

                          -> Tensor v'2 t

                          set1_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

                          -> Tensor v'3 Int64

                          set1_shape: 1D Tensor, shape of a SparseTensor. `set1_shape[0...n-1]` must be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the - max set size across `0...n-1` dimensions.

                          -> Tensor v'4 Int64

                          set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major - order.

                          -> Tensor v'5 t

                          set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major - order.

                          -> Tensor v'6 Int64

                          set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must + max set size across `0...n-1` dimensions.

                          -> Tensor v'4 Int64

                          set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

                          -> Tensor v'5 t

                          set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

                          -> Tensor v'6 Int64

                          set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the - max set size across `0...n-1` dimensions.

                          -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                          (result_indices, result_values, result_shape)

                          • result_indices: 2D indices of a SparseTensor.
                          • result_values: 1D values of a SparseTensor.
                          • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + max set size across `0...n-1` dimensions.

                          -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                          (result_indices, result_values, result_shape)

                          • result_indices: 2D indices of a SparseTensor.
                          • result_values: 1D values of a SparseTensor.
                          • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` - is the max result set size across all `0...n-1` dimensions.

                          Applies set operation along last dimension of 2 SparseTensor inputs.

                          See SetOperationOp::SetOperationFromContext for values of set_operation.

                          If validate_indices is True, SparseToSparseSetOperation validates the + is the max result set size across all `0...n-1` dimensions.

                          Applies set operation along last dimension of 2 SparseTensor inputs.

                          See SetOperationOp::SetOperationFromContext for values of set_operation.

                          If validate_indices is True, SparseToSparseSetOperation validates the order and range of set1 and set2 indices.

                          Input set1 is a SparseTensor represented by set1_indices, set1_values, and set1_shape. For set1 ranked n, 1st `n-1` dimensions must be the same as set2. Dimension n contains values in a set, duplicates are allowed but ignored.

                          Input set2 is a SparseTensor represented by set2_indices, set2_values, and set2_shape. For set2 ranked n, 1st `n-1` dimensions must be the same as set1. Dimension n contains values in a set, duplicates are allowed but - ignored.

                          If validate_indices is True, this op validates the order and range of set1 + ignored.

                          If validate_indices is True, this op validates the order and range of set1 and set2 indices.

                          Output result is a SparseTensor represented by result_indices, result_values, and result_shape. For set1 and set2 ranked n, this has rank n and the same 1st `n-1` dimensions as set1 and set2. The nth dimension contains the result of set_operation applied to the corresponding - `[0...n-1]` dimension of set.

                          sparseToSparseSetOperation'

                          Arguments

                          :: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          set1_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major - order.

                          -> Tensor v'2 t

                          set1_values: 1D Tensor, values of a SparseTensor. Must be in row-major - order.

                          -> Tensor v'3 Int64

                          set1_shape: 1D Tensor, shape of a SparseTensor. `set1_shape[0...n-1]` must + `[0...n-1]` dimension of set.

                          sparseToSparseSetOperation' Source #

                          Arguments

                          :: OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t 
                          => OpParams 
                          -> Tensor v'1 Int64

                          set1_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

                          -> Tensor v'2 t

                          set1_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

                          -> Tensor v'3 Int64

                          set1_shape: 1D Tensor, shape of a SparseTensor. `set1_shape[0...n-1]` must be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the - max set size across `0...n-1` dimensions.

                          -> Tensor v'4 Int64

                          set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major - order.

                          -> Tensor v'5 t

                          set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major - order.

                          -> Tensor v'6 Int64

                          set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must + max set size across `0...n-1` dimensions.

                          -> Tensor v'4 Int64

                          set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

                          -> Tensor v'5 t

                          set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

                          -> Tensor v'6 Int64

                          set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the - max set size across `0...n-1` dimensions.

                          -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                          (result_indices, result_values, result_shape)

                          • result_indices: 2D indices of a SparseTensor.
                          • result_values: 1D values of a SparseTensor.
                          • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + max set size across `0...n-1` dimensions.

                          -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

                          (result_indices, result_values, result_shape)

                          • result_indices: 2D indices of a SparseTensor.
                          • result_values: 1D values of a SparseTensor.
                          • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` - is the max result set size across all `0...n-1` dimensions.

                          split

                          Arguments

                          :: TensorType t 
                          => Int64

                          num_split: The number of ways to split. Must evenly divide - `value.shape[split_dim]`.

                          -> Tensor v'1 Int32

                          split_dim: 0-D. The dimension along which to split. Must be in the range - `[0, rank(value))`.

                          -> Tensor v'2 t

                          value: The tensor to split.

                          -> [Tensor Build t]

                          output: They are identically shaped tensors, whose shape matches that of value + is the max result set size across all `0...n-1` dimensions.

                          split Source #

                          Arguments

                          :: TensorType t 
                          => Int64

                          num_split: The number of ways to split. Must evenly divide + `value.shape[split_dim]`.

                          -> Tensor v'1 Int32

                          split_dim: 0-D. The dimension along which to split. Must be in the range + `[-rank(value), rank(value))`.

                          -> Tensor v'2 t

                          value: The tensor to split.

                          -> [Tensor Build t]

                          output: They are identically shaped tensors, whose shape matches that of value except along split_dim, where their sizes are - `values.shape[split_dim] / num_split`.

                          Splits a tensor into num_split tensors along one dimension.

                          split'

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Int64

                          num_split: The number of ways to split. Must evenly divide - `value.shape[split_dim]`.

                          -> Tensor v'1 Int32

                          split_dim: 0-D. The dimension along which to split. Must be in the range - `[0, rank(value))`.

                          -> Tensor v'2 t

                          value: The tensor to split.

                          -> [Tensor Build t]

                          output: They are identically shaped tensors, whose shape matches that of value + `values.shape[split_dim] / num_split`.

                          Splits a tensor into num_split tensors along one dimension.

                          split' Source #

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Int64

                          num_split: The number of ways to split. Must evenly divide + `value.shape[split_dim]`.

                          -> Tensor v'1 Int32

                          split_dim: 0-D. The dimension along which to split. Must be in the range + `[-rank(value), rank(value))`.

                          -> Tensor v'2 t

                          value: The tensor to split.

                          -> [Tensor Build t]

                          output: They are identically shaped tensors, whose shape matches that of value except along split_dim, where their sizes are - `values.shape[split_dim] / num_split`.

                          splitV

                          Arguments

                          :: (TensorType t, OneOf `[Int32, Int64]` tlen) 
                          => Int64

                          num_split

                          -> Tensor v'1 t

                          value: The tensor to split.

                          -> Tensor v'2 tlen

                          size_splits: list containing the sizes of each output tensor along the split + `values.shape[split_dim] / num_split`.

                          splitV Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] tlen) 
                          => Int64

                          num_split

                          -> Tensor v'1 t

                          value: The tensor to split.

                          -> Tensor v'2 tlen

                          size_splits: list containing the sizes of each output tensor along the split dimension. Must sum to the dimension of value along split_dim. - Can contain one -1 indicating that dimension is to be inferred.

                          -> Tensor v'3 Int32

                          split_dim: 0-D. The dimension along which to split. Must be in the range - `[0, rank(value))`.

                          -> [Tensor Build t]

                          output: Tensors whose shape matches that of value + Can contain one -1 indicating that dimension is to be inferred.

                          -> Tensor v'3 Int32

                          split_dim: 0-D. The dimension along which to split. Must be in the range + `[-rank(value), rank(value))`.

                          -> [Tensor Build t]

                          output: Tensors whose shape matches that of value except along split_dim, where their sizes are - `size_splits[i]`.

                          Splits a tensor into num_split tensors along one dimension.

                          splitV'

                          Arguments

                          :: (TensorType t, OneOf `[Int32, Int64]` tlen) 
                          => OpParams 
                          -> Int64

                          num_split

                          -> Tensor v'1 t

                          value: The tensor to split.

                          -> Tensor v'2 tlen

                          size_splits: list containing the sizes of each output tensor along the split + `size_splits[i]`.

                          Splits a tensor into num_split tensors along one dimension.

                          splitV' Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] tlen) 
                          => OpParams 
                          -> Int64

                          num_split

                          -> Tensor v'1 t

                          value: The tensor to split.

                          -> Tensor v'2 tlen

                          size_splits: list containing the sizes of each output tensor along the split dimension. Must sum to the dimension of value along split_dim. - Can contain one -1 indicating that dimension is to be inferred.

                          -> Tensor v'3 Int32

                          split_dim: 0-D. The dimension along which to split. Must be in the range - `[0, rank(value))`.

                          -> [Tensor Build t]

                          output: Tensors whose shape matches that of value + Can contain one -1 indicating that dimension is to be inferred.

                          -> Tensor v'3 Int32

                          split_dim: 0-D. The dimension along which to split. Must be in the range + `[-rank(value), rank(value))`.

                          -> [Tensor Build t]

                          output: Tensors whose shape matches that of value except along split_dim, where their sizes are - `size_splits[i]`.

                          sqrt

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                          => Tensor v'1 t

                          x

                          -> Tensor Build t

                          y

                          Computes square root of x element-wise.

                          I.e., \(y = sqrt{x} = x^{1/2}\).

                          sqrt'

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                          => OpParams 
                          -> Tensor v'1 t

                          x

                          -> Tensor Build t

                          y

                          sqrtGrad

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                          => Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          Computes the gradient for the sqrt of x wrt its input.

                          Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and dy - is the corresponding input gradient.

                          sqrtGrad'

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                          => OpParams 
                          -> Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          square

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                          => Tensor v'1 t

                          x

                          -> Tensor Build t

                          y

                          Computes square of x element-wise.

                          I.e., \(y = x * x = x^2\).

                          squaredDifference

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                          => Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          Returns (x - y)(x - y) element-wise.

                          • NOTE*: SquaredDifference supports broadcasting. More about broadcasting - here

                          squeeze

                          Arguments

                          :: TensorType t 
                          => Tensor v'1 t

                          input: The input to squeeze.

                          -> Tensor Build t

                          output: Contains the same data as input, but has one or more dimensions of + `size_splits[i]`.

                          sqrt Source #

                          Arguments

                          :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                          => Tensor v'1 t

                          x

                          -> Tensor Build t

                          y

                          Computes square root of x element-wise.

                          I.e., \(y = sqrt{x} = x^{1/2}\).

                          sqrtGrad Source #

                          Arguments

                          :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                          => Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          Computes the gradient for the sqrt of x wrt its input.

                          Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and dy + is the corresponding input gradient.

                          sqrtGrad' Source #

                          Arguments

                          :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                          => OpParams 
                          -> Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          square Source #

                          Arguments

                          :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                          => Tensor v'1 t

                          x

                          -> Tensor Build t

                          y

                          Computes square of x element-wise.

                          I.e., \(y = x * x = x^2\).

                          squaredDifference Source #

                          Arguments

                          :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                          => Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          Returns (x - y)(x - y) element-wise.

                          • NOTE*: SquaredDifference supports broadcasting. More about broadcasting + here

                          squeeze Source #

                          Arguments

                          :: TensorType t 
                          => Tensor v'1 t

                          input: The input to squeeze.

                          -> Tensor Build t

                          output: Contains the same data as input, but has one or more dimensions of size 1 removed.

                          Removes dimensions of size 1 from the shape of a tensor.

                          Given a tensor input, this operation returns a tensor of the same type with all dimensions of size 1 removed. If you don't want to remove all size 1 dimensions, you can remove specific size 1 dimensions by specifying - squeeze_dims.

                          For example:

                          ```prettyprint + squeeze_dims.

                          For example:

                          ``` # t is a tensor of shape [1, 2, 1, 3, 1, 1] shape(squeeze(t)) ==> [2, 3] - ```

                          Or, to remove specific size 1 dimensions:

                          ```prettyprint + ```

                          Or, to remove specific size 1 dimensions:

                          ``` # t is a tensor of shape [1, 2, 1, 3, 1, 1] shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] - ```

                          squeeze'

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Tensor v'1 t

                          input: The input to squeeze.

                          -> Tensor Build t

                          output: Contains the same data as input, but has one or more dimensions of - size 1 removed.

                          stack

                          Arguments

                          :: MonadBuild m' 
                          => DataType

                          elem_type: The type of the elements on the stack.

                          -> m' (Tensor Ref ByteString)

                          handle: The handle to the stack.

                          A stack that produces elements in first-in last-out order.

                          stack'

                          Arguments

                          :: MonadBuild m' 
                          => OpParams 
                          -> DataType

                          elem_type: The type of the elements on the stack.

                          -> m' (Tensor Ref ByteString)

                          handle: The handle to the stack.

                          stackClose

                          Arguments

                          :: MonadBuild m' 
                          => Tensor Ref ByteString

                          handle: The handle to a stack.

                          -> m' ControlNode 

                          Delete the stack from its resource container.

                          stackClose'

                          Arguments

                          :: MonadBuild m' 
                          => OpParams 
                          -> Tensor Ref ByteString

                          handle: The handle to a stack.

                          -> m' ControlNode 

                          stackPop

                          Arguments

                          :: (MonadBuild m', TensorType elem_type) 
                          => Tensor Ref ByteString

                          handle: The handle to a stack.

                          -> m' (Tensor Value elem_type)

                          elem: The tensor that is popped from the top of the stack.

                          Pop the element at the top of the stack.

                          stackPop'

                          Arguments

                          :: (MonadBuild m', TensorType elem_type) 
                          => OpParams 
                          -> Tensor Ref ByteString

                          handle: The handle to a stack.

                          -> m' (Tensor Value elem_type)

                          elem: The tensor that is popped from the top of the stack.

                          stackPush

                          Arguments

                          :: (MonadBuild m', TensorType t) 
                          => Tensor Ref ByteString

                          handle: The handle to a stack.

                          -> Tensor v'2 t

                          elem: The tensor to be pushed onto the stack.

                          -> m' (Tensor Value t)

                          output: The same tensor as the input elem.

                          Push an element onto the stack.

                          stackPush'

                          Arguments

                          :: (MonadBuild m', TensorType t) 
                          => OpParams 
                          -> Tensor Ref ByteString

                          handle: The handle to a stack.

                          -> Tensor v'2 t

                          elem: The tensor to be pushed onto the stack.

                          -> m' (Tensor Value t)

                          output: The same tensor as the input elem.

                          stage

                          Arguments

                          :: (MonadBuild m', TensorTypes dtypes) 
                          => TensorList v'1 dtypes

                          values: a list of tensors

                          -> m' ControlNode 

                          Stage values similar to a lightweight Enqueue. The basic functionality of this

                          Op is similar to a queue with many fewer capabilities and options. This Op is - optimized for performance.

                          stage'

                          Arguments

                          :: (MonadBuild m', TensorTypes dtypes) 
                          => OpParams 
                          -> TensorList v'1 dtypes

                          values: a list of tensors

                          -> m' ControlNode 

                          stopGradient

                          Arguments

                          :: TensorType t 
                          => Tensor v'1 t

                          input

                          -> Tensor Build t

                          output

                          Stops gradient computation.

                          When executed in a graph, this op outputs its input tensor as-is.

                          When building ops to compute gradients, this op prevents the contribution of + ```

                          squeeze' Source #

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Tensor v'1 t

                          input: The input to squeeze.

                          -> Tensor Build t

                          output: Contains the same data as input, but has one or more dimensions of + size 1 removed.

                          stack Source #

                          Arguments

                          :: MonadBuild m' 
                          => DataType

                          elem_type

                          -> m' (Tensor Ref ByteString)

                          handle

                          Deprecated, use StackV2.

                          stack' Source #

                          Arguments

                          :: MonadBuild m' 
                          => OpParams 
                          -> DataType

                          elem_type

                          -> m' (Tensor Ref ByteString)

                          handle

                          stackClose Source #

                          Arguments

                          :: MonadBuild m' 
                          => Tensor Ref ByteString

                          handle

                          -> m' ControlNode 

                          Deprecated, use StackCloseV2.

                          stackCloseV2 Source #

                          Arguments

                          :: MonadBuild m' 
                          => Tensor v'1 ResourceHandle

                          handle: The handle to a stack.

                          -> m' ControlNode 

                          Delete the stack from its resource container.

                          stackCloseV2' Source #

                          Arguments

                          :: MonadBuild m' 
                          => OpParams 
                          -> Tensor v'1 ResourceHandle

                          handle: The handle to a stack.

                          -> m' ControlNode 

                          stackPop Source #

                          Arguments

                          :: (MonadBuild m', TensorType elem_type) 
                          => Tensor Ref ByteString

                          handle

                          -> m' (Tensor Value elem_type)

                          elem

                          Deprecated, use StackPopV2.

                          stackPop' Source #

                          Arguments

                          :: (MonadBuild m', TensorType elem_type) 
                          => OpParams 
                          -> Tensor Ref ByteString

                          handle

                          -> m' (Tensor Value elem_type)

                          elem

                          stackPopV2 Source #

                          Arguments

                          :: (MonadBuild m', TensorType elem_type) 
                          => Tensor v'1 ResourceHandle

                          handle: The handle to a stack.

                          -> m' (Tensor Value elem_type)

                          elem: The tensor that is popped from the top of the stack.

                          Pop the element at the top of the stack.

                          stackPopV2' Source #

                          Arguments

                          :: (MonadBuild m', TensorType elem_type) 
                          => OpParams 
                          -> Tensor v'1 ResourceHandle

                          handle: The handle to a stack.

                          -> m' (Tensor Value elem_type)

                          elem: The tensor that is popped from the top of the stack.

                          stackPush Source #

                          Arguments

                          :: (MonadBuild m', TensorType t) 
                          => Tensor Ref ByteString

                          handle

                          -> Tensor v'2 t

                          elem

                          -> m' (Tensor Value t)

                          output

                          Deprecated, use StackPushV2.

                          stackPush' Source #

                          Arguments

                          :: (MonadBuild m', TensorType t) 
                          => OpParams 
                          -> Tensor Ref ByteString

                          handle

                          -> Tensor v'2 t

                          elem

                          -> m' (Tensor Value t)

                          output

                          stackPushV2 Source #

                          Arguments

                          :: (MonadBuild m', TensorType t) 
                          => Tensor v'1 ResourceHandle

                          handle: The handle to a stack.

                          -> Tensor v'2 t

                          elem: The tensor to be pushed onto the stack.

                          -> m' (Tensor Value t)

                          output: The same tensor as the input elem.

                          Push an element onto the stack.

                          stackPushV2' Source #

                          Arguments

                          :: (MonadBuild m', TensorType t) 
                          => OpParams 
                          -> Tensor v'1 ResourceHandle

                          handle: The handle to a stack.

                          -> Tensor v'2 t

                          elem: The tensor to be pushed onto the stack.

                          -> m' (Tensor Value t)

                          output: The same tensor as the input elem.

                          stackV2 Source #

                          Arguments

                          :: MonadBuild m' 
                          => DataType

                          elem_type: The type of the elements on the stack.

                          -> Tensor v'1 Int32

                          max_size: The maximum size of the stack if non-negative. If negative, the stack + size is unlimited.

                          -> m' (Tensor Value ResourceHandle)

                          handle: The handle to the stack.

                          A stack that produces elements in first-in last-out order.

                          stackV2' Source #

                          Arguments

                          :: MonadBuild m' 
                          => OpParams 
                          -> DataType

                          elem_type: The type of the elements on the stack.

                          -> Tensor v'1 Int32

                          max_size: The maximum size of the stack if non-negative. If negative, the stack + size is unlimited.

                          -> m' (Tensor Value ResourceHandle)

                          handle: The handle to the stack.

                          stage Source #

                          Arguments

                          :: (MonadBuild m', TensorTypes dtypes) 
                          => TensorList v'1 dtypes

                          values: a list of tensors + dtypes A list of data types that inserted values should adhere to.

                          -> m' ControlNode 

                          Stage values similar to a lightweight Enqueue.

                          The basic functionality of this Op is similar to a queue with many + fewer capabilities and options. This Op is optimized for performance.

                          stage' Source #

                          Arguments

                          :: (MonadBuild m', TensorTypes dtypes) 
                          => OpParams 
                          -> TensorList v'1 dtypes

                          values: a list of tensors + dtypes A list of data types that inserted values should adhere to.

                          -> m' ControlNode 

                          stageClear Source #

                          Arguments

                          :: MonadBuild m' 
                          => [DataType]

                          dtypes

                          -> m' ControlNode 

                          Op removes all elements in the underlying container.

                          stageClear' Source #

                          Arguments

                          :: MonadBuild m' 
                          => OpParams 
                          -> [DataType]

                          dtypes

                          -> m' ControlNode 

                          stagePeek Source #

                          Arguments

                          :: (MonadBuild m', TensorTypes dtypes) 
                          => Tensor v'1 Int32

                          index

                          -> m' (TensorList Value dtypes)

                          values

                          Op peeks at the values at the specified index. If the

                          underlying container does not contain sufficient elements + this op will block until it does. This Op is optimized for + performance.

                          stagePeek' Source #

                          Arguments

                          :: (MonadBuild m', TensorTypes dtypes) 
                          => OpParams 
                          -> Tensor v'1 Int32

                          index

                          -> m' (TensorList Value dtypes)

                          values

                          stageSize Source #

                          Arguments

                          :: MonadBuild m' 
                          => [DataType]

                          dtypes

                          -> m' (Tensor Value Int32)

                          size

                          Op returns the number of elements in the underlying container.

                          stageSize' Source #

                          Arguments

                          :: MonadBuild m' 
                          => OpParams 
                          -> [DataType]

                          dtypes

                          -> m' (Tensor Value Int32)

                          size

                          statelessRandomNormal Source #

                          Arguments

                          :: (OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                          => Tensor v'1 t

                          shape: The shape of the output tensor.

                          -> Tensor v'2 Int64

                          seed: 2 seeds (shape [2]).

                          -> Tensor Build dtype

                          output: Random values with specified shape.

                          Outputs deterministic pseudorandom values from a normal distribution.

                          The generated values will have mean 0 and standard deviation 1.

                          The outputs are a deterministic function of shape and seed.

                          statelessRandomNormal' Source #

                          Arguments

                          :: (OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                          => OpParams 
                          -> Tensor v'1 t

                          shape: The shape of the output tensor.

                          -> Tensor v'2 Int64

                          seed: 2 seeds (shape [2]).

                          -> Tensor Build dtype

                          output: Random values with specified shape.

                          statelessRandomUniform Source #

                          Arguments

                          :: (OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                          => Tensor v'1 t

                          shape: The shape of the output tensor.

                          -> Tensor v'2 Int64

                          seed: 2 seeds (shape [2]).

                          -> Tensor Build dtype

                          output: Random values with specified shape.

                          Outputs deterministic pseudorandom random values from a uniform distribution.

                          The generated values follow a uniform distribution in the range `[0, 1)`. The + lower bound 0 is included in the range, while the upper bound 1 is excluded.

                          The outputs are a deterministic function of shape and seed.

                          statelessRandomUniform' Source #

                          Arguments

                          :: (OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                          => OpParams 
                          -> Tensor v'1 t

                          shape: The shape of the output tensor.

                          -> Tensor v'2 Int64

                          seed: 2 seeds (shape [2]).

                          -> Tensor Build dtype

                          output: Random values with specified shape.

                          statelessTruncatedNormal Source #

                          Arguments

                          :: (OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                          => Tensor v'1 t

                          shape: The shape of the output tensor.

                          -> Tensor v'2 Int64

                          seed: 2 seeds (shape [2]).

                          -> Tensor Build dtype

                          output: Random values with specified shape.

                          Outputs deterministic pseudorandom values from a truncated normal distribution.

                          The generated values follow a normal distribution with mean 0 and standard + deviation 1, except that values whose magnitude is more than 2 standard + deviations from the mean are dropped and re-picked.

                          The outputs are a deterministic function of shape and seed.

                          statelessTruncatedNormal' Source #

                          Arguments

                          :: (OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                          => OpParams 
                          -> Tensor v'1 t

                          shape: The shape of the output tensor.

                          -> Tensor v'2 Int64

                          seed: 2 seeds (shape [2]).

                          -> Tensor Build dtype

                          output: Random values with specified shape.

                          stopGradient Source #

                          Arguments

                          :: TensorType t 
                          => Tensor v'1 t

                          input

                          -> Tensor Build t

                          output

                          Stops gradient computation.

                          When executed in a graph, this op outputs its input tensor as-is.

                          When building ops to compute gradients, this op prevents the contribution of its inputs to be taken into account. Normally, the gradient generator adds ops to a graph to compute the derivatives of a specified loss by recursively finding out inputs that contributed to its computation. If you insert this op @@ -3250,16 +3782,16 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core through the output of the *E-step*.

                        • Contrastive divergence training of Boltzmann machines where, when differentiating the energy function, the training must not backpropagate through the graph that generated the samples from the model.
                        • Adversarial training, where no backprop should happen through the adversarial - example generation process.
                        • stopGradient'

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Tensor v'1 t

                          input

                          -> Tensor Build t

                          output

                          stridedSlice

                          Arguments

                          :: (TensorType t, OneOf `[Int32, Int64]` index) 
                          => Tensor v'1 t

                          input

                          -> Tensor v'2 index

                          begin: `begin[k]` specifies the offset into the kth range specification. + example generation process.

                          stopGradient' Source #

                          Arguments

                          :: TensorType t 
                          => OpParams 
                          -> Tensor v'1 t

                          input

                          -> Tensor Build t

                          output

                          stridedSlice Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] index) 
                          => Tensor v'1 t

                          input

                          -> Tensor v'2 index

                          begin: `begin[k]` specifies the offset into the kth range specification. The exact dimension this corresponds to will be determined by context. Out-of-bounds values will be silently clamped. If the kth bit of begin_mask then `begin[k]` is ignored and the full range of the appropriate dimension is used instead. Negative values causes indexing - to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.

                          -> Tensor v'3 index

                          end: `end[i]` is like begin with the exception that end_mask is - used to determine full ranges.

                          -> Tensor v'4 index

                          strides: `strides[i]` specifies the increment in the ith specification + to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.

                          -> Tensor v'3 index

                          end: `end[i]` is like begin with the exception that end_mask is + used to determine full ranges.

                          -> Tensor v'4 index

                          strides: `strides[i]` specifies the increment in the ith specification after extracting a given element. Negative indices will reverse the original order. Out or range values are - clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`

                          -> Tensor Build t

                          output

                          Return a strided slice from input.

                          Note, most python users will want to use the Python __getitem__ + clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`

                          -> Tensor Build t

                          output

                          Return a strided slice from input.

                          Note, most python users will want to use the Python __getitem__ or __getitem__ rather than this op directly.

                          The goal of this op is to produce a new tensor with a subset of the elements from the n dimensional input tensor. The subset is chosen using a sequence of m sparse range specifications encoded into the arguments @@ -3289,7 +3821,7 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core shrink_axis_mask.

                          Each conceptual range specification is encoded in the op's argument. This encoding is best understand by considering a non-trivial example. In particular, - `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as

                          ```prettyprint + `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as

                          ``` begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) end = [2, 4, x, x, -3, x] strides = [1, 1, x, x, -1, 1] @@ -3316,28 +3848,28 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core receive 0, 0, and 1, respectively. The appropriate bits in begin_mask and end_mask are also set.

                          • Requirements*: `0 != strides[i] for i in [0, m)` - `ellipsis_mask must be a power of two (only one ellipsis)`

                          stridedSlice'

                          Arguments

                          :: (TensorType t, OneOf `[Int32, Int64]` index) 
                          => OpParams 
                          -> Tensor v'1 t

                          input

                          -> Tensor v'2 index

                          begin: `begin[k]` specifies the offset into the kth range specification. + `ellipsis_mask must be a power of two (only one ellipsis)`

                          stridedSlice' Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] index) 
                          => OpParams 
                          -> Tensor v'1 t

                          input

                          -> Tensor v'2 index

                          begin: `begin[k]` specifies the offset into the kth range specification. The exact dimension this corresponds to will be determined by context. Out-of-bounds values will be silently clamped. If the kth bit of begin_mask then `begin[k]` is ignored and the full range of the appropriate dimension is used instead. Negative values causes indexing - to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.

                          -> Tensor v'3 index

                          end: `end[i]` is like begin with the exception that end_mask is - used to determine full ranges.

                          -> Tensor v'4 index

                          strides: `strides[i]` specifies the increment in the ith specification + to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.

                          -> Tensor v'3 index

                          end: `end[i]` is like begin with the exception that end_mask is + used to determine full ranges.

                          -> Tensor v'4 index

                          strides: `strides[i]` specifies the increment in the ith specification after extracting a given element. Negative indices will reverse the original order. Out or range values are - clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`

                          -> Tensor Build t

                          output

                          stridedSliceAssign

                          Arguments

                          :: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` index) 
                          => Tensor Ref t

                          ref

                          -> Tensor v'2 index

                          begin

                          -> Tensor v'3 index

                          end

                          -> Tensor v'4 index

                          strides

                          -> Tensor v'5 t

                          value

                          -> m' (Tensor Ref t)

                          output_ref

                          Assign value to the sliced l-value reference of ref.

                          The values of value are assigned to the positions in the variable + clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`

                          -> Tensor Build t

                          output

                          stridedSliceAssign Source #

                          Arguments

                          :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) 
                          => Tensor Ref t

                          ref

                          -> Tensor v'2 index

                          begin

                          -> Tensor v'3 index

                          end

                          -> Tensor v'4 index

                          strides

                          -> Tensor v'5 t

                          value

                          -> m' (Tensor Ref t)

                          output_ref

                          Assign value to the sliced l-value reference of ref.

                          The values of value are assigned to the positions in the variable ref that are selected by the slice parameters. The slice parameters - `begin, end, strides, etc. work exactly as in StridedSlice.

                          NOTE this op currently does not support broadcasting and so value's - shape must be exactly the shape produced by the slice of ref.

                          stridedSliceAssign'

                          Arguments

                          :: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` index) 
                          => OpParams 
                          -> Tensor Ref t

                          ref

                          -> Tensor v'2 index

                          begin

                          -> Tensor v'3 index

                          end

                          -> Tensor v'4 index

                          strides

                          -> Tensor v'5 t

                          value

                          -> m' (Tensor Ref t)

                          output_ref

                          stridedSliceGrad

                          Arguments

                          :: (TensorType t, OneOf `[Int32, Int64]` index) 
                          => Tensor v'1 index

                          shape

                          -> Tensor v'2 index

                          begin

                          -> Tensor v'3 index

                          end

                          -> Tensor v'4 index

                          strides

                          -> Tensor v'5 t

                          dy

                          -> Tensor Build t

                          output

                          Returns the gradient of StridedSlice.

                          Since StridedSlice cuts out pieces of its input which is size + `begin, end, strides, etc. work exactly as in StridedSlice.

                          NOTE this op currently does not support broadcasting and so value's + shape must be exactly the shape produced by the slice of ref.

                          stridedSliceAssign' Source #

                          Arguments

                          :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) 
                          => OpParams 
                          -> Tensor Ref t

                          ref

                          -> Tensor v'2 index

                          begin

                          -> Tensor v'3 index

                          end

                          -> Tensor v'4 index

                          strides

                          -> Tensor v'5 t

                          value

                          -> m' (Tensor Ref t)

                          output_ref

                          stridedSliceGrad Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] index) 
                          => Tensor v'1 index

                          shape

                          -> Tensor v'2 index

                          begin

                          -> Tensor v'3 index

                          end

                          -> Tensor v'4 index

                          strides

                          -> Tensor v'5 t

                          dy

                          -> Tensor Build t

                          output

                          Returns the gradient of StridedSlice.

                          Since StridedSlice cuts out pieces of its input which is size shape, its gradient will have the same shape (which is passed here as shape). The gradient will be zero in any element that the slice does not select.

                          Arguments are the same as StridedSliceGrad with the exception that dy is the input gradient to be propagated and shape is the - shape of StridedSlice's input.

                          stridedSliceGrad'

                          Arguments

                          :: (TensorType t, OneOf `[Int32, Int64]` index) 
                          => OpParams 
                          -> Tensor v'1 index

                          shape

                          -> Tensor v'2 index

                          begin

                          -> Tensor v'3 index

                          end

                          -> Tensor v'4 index

                          strides

                          -> Tensor v'5 t

                          dy

                          -> Tensor Build t

                          output

                          stringJoin

                          Arguments

                          :: [Tensor v'1 ByteString]

                          inputs: A list of string tensors. The tensors must all have the same shape, + shape of StridedSlice's input.

                          stridedSliceGrad' Source #

                          Arguments

                          :: (TensorType t, OneOf '[Int32, Int64] index) 
                          => OpParams 
                          -> Tensor v'1 index

                          shape

                          -> Tensor v'2 index

                          begin

                          -> Tensor v'3 index

                          end

                          -> Tensor v'4 index

                          strides

                          -> Tensor v'5 t

                          dy

                          -> Tensor Build t

                          output

                          stringJoin Source #

                          Arguments

                          :: [Tensor v'1 ByteString]

                          inputs: A list of string tensors. The tensors must all have the same shape, or be scalars. Scalars may be mixed in; these will be broadcast to the shape - of non-scalar inputs.

                          -> Tensor Build ByteString

                          output

                          Joins the strings in the given list of string tensors into one tensor;

                          with the given separator (default is an empty separator).

                          stringJoin'

                          Arguments

                          :: OpParams 
                          -> [Tensor v'1 ByteString]

                          inputs: A list of string tensors. The tensors must all have the same shape, + of non-scalar inputs.

                          -> Tensor Build ByteString

                          output

                          Joins the strings in the given list of string tensors into one tensor;

                          with the given separator (default is an empty separator).

                          stringJoin' Source #

                          Arguments

                          :: OpParams 
                          -> [Tensor v'1 ByteString]

                          inputs: A list of string tensors. The tensors must all have the same shape, or be scalars. Scalars may be mixed in; these will be broadcast to the shape - of non-scalar inputs.

                          -> Tensor Build ByteString

                          output

                          stringSplit

                          Arguments

                          :: Tensor v'1 ByteString

                          input: 1-D. Strings to split.

                          -> Tensor v'2 ByteString

                          delimiter: 0-D. Delimiter characters (bytes), or empty string.

                          -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)

                          (indices, values, shape)

                          • indices: A dense matrix of int64 representing the indices of the sparse tensor.
                          • values: A vector of strings corresponding to the splited values.
                          • shape: a length-2 vector of int64 representing the shape of the sparse + of non-scalar inputs.

                          -> Tensor Build ByteString

                          output

                          stringSplit Source #

                          Arguments

                          :: Tensor v'1 ByteString

                          input: 1-D. Strings to split.

                          -> Tensor v'2 ByteString

                          delimiter: 0-D. Delimiter characters (bytes), or empty string.

                          -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)

                          (indices, values, shape)

                          • indices: A dense matrix of int64 representing the indices of the sparse tensor.
                          • values: A vector of strings corresponding to the splited values.
                          • shape: a length-2 vector of int64 representing the shape of the sparse tensor, where the first value is N and the second value is the maximum number of tokens in a single input entry.

                          Split elements of input based on delimiter into a SparseTensor.

                          Let N be the size of source (typically N will be the batch size). Split each element of input based on delimiter and return a SparseTensor @@ -3352,36 +3884,36 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core 1, 1; 1, 2] shape = [2, 3] - values = [hello, world, a, b, c]

                          stringSplit'

                          Arguments

                          :: OpParams 
                          -> Tensor v'1 ByteString

                          input: 1-D. Strings to split.

                          -> Tensor v'2 ByteString

                          delimiter: 0-D. Delimiter characters (bytes), or empty string.

                          -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)

                          (indices, values, shape)

                          • indices: A dense matrix of int64 representing the indices of the sparse tensor.
                          • values: A vector of strings corresponding to the splited values.
                          • shape: a length-2 vector of int64 representing the shape of the sparse + values = [hello, world, a, b, c]

                            stringSplit' Source #

                            Arguments

                            :: OpParams 
                            -> Tensor v'1 ByteString

                            input: 1-D. Strings to split.

                            -> Tensor v'2 ByteString

                            delimiter: 0-D. Delimiter characters (bytes), or empty string.

                            -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)

                            (indices, values, shape)

                            • indices: A dense matrix of int64 representing the indices of the sparse tensor.
                            • values: A vector of strings corresponding to the splited values.
                            • shape: a length-2 vector of int64 representing the shape of the sparse tensor, where the first value is N and the second value is the maximum number - of tokens in a single input entry.

                            stringToHashBucket

                            Arguments

                            :: Int64

                            num_buckets: The number of buckets.

                            -> Tensor v'1 ByteString

                            string_tensor

                            -> Tensor Build Int64

                            output: A Tensor of the same shape as the input string_tensor.

                            Converts each string in the input Tensor to its hash mod by a number of buckets.

                            The hash function is deterministic on the content of the string within the + of tokens in a single input entry.

                          stringToHashBucket Source #

                          Arguments

                          :: Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          string_tensor

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          Converts each string in the input Tensor to its hash mod by a number of buckets.

                          The hash function is deterministic on the content of the string within the process.

                          Note that the hash function may change from time to time. This functionality will be deprecated and it's recommended to use - `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.

                          stringToHashBucket'

                          Arguments

                          :: OpParams 
                          -> Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          string_tensor

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          stringToHashBucketFast

                          Arguments

                          :: Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          input: The strings to assign a hash bucket.

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          Converts each string in the input Tensor to its hash mod by a number of buckets.

                          The hash function is deterministic on the content of the string within the + `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.

                          stringToHashBucket' Source #

                          Arguments

                          :: OpParams 
                          -> Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          string_tensor

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          stringToHashBucketFast Source #

                          Arguments

                          :: Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          input: The strings to assign a hash bucket.

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          Converts each string in the input Tensor to its hash mod by a number of buckets.

                          The hash function is deterministic on the content of the string within the process and will never change. However, it is not suitable for cryptography. This function may be used when CPU time is scarce and inputs are trusted or unimportant. There is a risk of adversaries constructing inputs that all hash to the same bucket. To prevent this problem, use a strong hash function with - `tf.string_to_hash_bucket_strong`.

                          stringToHashBucketFast'

                          Arguments

                          :: OpParams 
                          -> Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          input: The strings to assign a hash bucket.

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          stringToHashBucketStrong

                          Arguments

                          :: Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          input: The strings to assign a hash bucket.

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          Converts each string in the input Tensor to its hash mod by a number of buckets.

                          The hash function is deterministic on the content of the string within the + `tf.string_to_hash_bucket_strong`.

                          stringToHashBucketFast' Source #

                          Arguments

                          :: OpParams 
                          -> Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          input: The strings to assign a hash bucket.

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          stringToHashBucketStrong Source #

                          Arguments

                          :: Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          input: The strings to assign a hash bucket.

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          Converts each string in the input Tensor to its hash mod by a number of buckets.

                          The hash function is deterministic on the content of the string within the process. The hash function is a keyed hash function, where attribute key defines the key of the hash function. key is an array of 2 elements.

                          A strong hash is important when inputs may be malicious, e.g. URLs with additional components. Adversaries could try to make their inputs hash to the same bucket for a denial-of-service attack or to skew the results. A strong - hash prevents this by making it dificult, if not infeasible, to compute inputs + hash prevents this by making it difficult, if not infeasible, to compute inputs that hash to the same bucket. This comes at a cost of roughly 4x higher compute - time than `tf.string_to_hash_bucket_fast`.

                          stringToHashBucketStrong'

                          Arguments

                          :: OpParams 
                          -> Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          input: The strings to assign a hash bucket.

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          stringToNumber

                          Arguments

                          :: OneOf `[Int32, Float]` out_type 
                          => Tensor v'1 ByteString

                          string_tensor

                          -> Tensor Build out_type

                          output: A Tensor of the same shape as the input string_tensor.

                          Converts each string in the input Tensor to the specified numeric type.

                          (Note that int32 overflow results in an error while float overflow - results in a rounded value.)

                          stringToNumber'

                          Arguments

                          :: OneOf `[Int32, Float]` out_type 
                          => OpParams 
                          -> Tensor v'1 ByteString

                          string_tensor

                          -> Tensor Build out_type

                          output: A Tensor of the same shape as the input string_tensor.

                          sub

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                          => Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          Returns x - y element-wise.

                          • NOTE*: Sub supports broadcasting. More about broadcasting - here

                          sub'

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                          => OpParams 
                          -> Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          substr

                          Arguments

                          :: OneOf `[Int32, Int64]` t 
                          => Tensor v'1 ByteString

                          input: Tensor of strings

                          -> Tensor v'2 t

                          pos: Scalar defining the position of first character in each substring

                          -> Tensor v'3 t

                          len: Scalar defining the number of characters to include in each substring

                          -> Tensor Build ByteString

                          output: Tensor of substrings

                          Return substrings from Tensor of strings.

                          For each string in the input Tensor, creates a substring starting at index + time than `tf.string_to_hash_bucket_fast`.

                          stringToHashBucketStrong' Source #

                          Arguments

                          :: OpParams 
                          -> Int64

                          num_buckets: The number of buckets.

                          -> Tensor v'1 ByteString

                          input: The strings to assign a hash bucket.

                          -> Tensor Build Int64

                          output: A Tensor of the same shape as the input string_tensor.

                          stringToNumber Source #

                          Arguments

                          :: OneOf '[Int32, Int64, Double, Float] out_type 
                          => Tensor v'1 ByteString

                          string_tensor

                          -> Tensor Build out_type

                          output: A Tensor of the same shape as the input string_tensor.

                          Converts each string in the input Tensor to the specified numeric type.

                          (Note that int32 overflow results in an error while float overflow + results in a rounded value.)

                          stringToNumber' Source #

                          Arguments

                          :: OneOf '[Int32, Int64, Double, Float] out_type 
                          => OpParams 
                          -> Tensor v'1 ByteString

                          string_tensor

                          -> Tensor Build out_type

                          output: A Tensor of the same shape as the input string_tensor.

                          sub Source #

                          Arguments

                          :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                          => Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          Returns x - y element-wise.

                          • NOTE*: Sub supports broadcasting. More about broadcasting + here

                          sub' Source #

                          Arguments

                          :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                          => OpParams 
                          -> Tensor v'1 t

                          x

                          -> Tensor v'2 t

                          y

                          -> Tensor Build t

                          z

                          substr Source #

                          Arguments

                          :: OneOf '[Int32, Int64] t 
                          => Tensor v'1 ByteString

                          input: Tensor of strings

                          -> Tensor v'2 t

                          pos: Scalar defining the position of first character in each substring

                          -> Tensor v'3 t

                          len: Scalar defining the number of characters to include in each substring

                          -> Tensor Build ByteString

                          output: Tensor of substrings

                          Return substrings from Tensor of strings.

                          For each string in the input Tensor, creates a substring starting at index pos with a total length of len.

                          If len defines a substring that would extend beyond the length of the input string, then as many characters as possible are used.

                          If pos is negative or specifies a character index larger than any of the input strings, then an InvalidArgumentError is thrown.

                          pos and len must have the same shape, otherwise a ValueError is thrown on Op creation.

                          • NOTE*: Substr supports broadcasting up to two dimensions. More about broadcasting - here
                          • --

                          Examples

                          Using scalar pos and len:

                          ``` + here

                        • --
                        • Examples

                          Using scalar pos and len:

                          ```python input = [bHello, bWorld] position = 1 length = 3

                          output = [bell, borl] - ```

                          Using pos and len with same shape as input:

                          ``` + ```

                          Using pos and len with same shape as input:

                          ```python input = [[bten, beleven, btwelve], [bthirteen, bfourteen, bfifteen], [bsixteen, bseventeen, beighteen]] @@ -3407,30 +3939,38 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core input = bthirteen position = [1, 5, 7] length = [3, 2, 1]

                          output = [bhir, bee, b'n"] - ```

                          substr'

                          Arguments

                          :: OneOf `[Int32, Int64]` t 
                          => OpParams 
                          -> Tensor v'1 ByteString

                          input: Tensor of strings

                          -> Tensor v'2 t

                          pos: Scalar defining the position of first character in each substring

                          -> Tensor v'3 t

                          len: Scalar defining the number of characters to include in each substring

                          -> Tensor Build ByteString

                          output: Tensor of substrings

                          sum

                          Arguments

                          :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                          => Tensor v'1 t

                          input: The tensor to reduce.

                          -> Tensor v'2 tidx

                          reduction_indices: The dimensions to reduce.

                          -> Tensor Build t

                          output: The reduced tensor.

                          Computes the sum of elements across dimensions of a tensor.

                          Reduces input along the dimensions given in reduction_indices. Unless + ```

                          substr' Source #

                          Arguments

                          :: OneOf '[Int32, Int64] t 
                          => OpParams 
                          -> Tensor v'1 ByteString

                          input: Tensor of strings

                          -> Tensor v'2 t

                          pos: Scalar defining the position of first character in each substring

                          -> Tensor v'3 t

                          len: Scalar defining the number of characters to include in each substring

                          -> Tensor Build ByteString

                          output: Tensor of substrings

                          sum Source #

                          Arguments

                          :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                          => Tensor v'1 t

                          input: The tensor to reduce.

                          -> Tensor v'2 tidx

                          reduction_indices: The dimensions to reduce.

                          -> Tensor Build t

                          output: The reduced tensor.

                          Computes the sum of elements across dimensions of a tensor.

                          Reduces input along the dimensions given in reduction_indices. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

                          sum'

                          Arguments

                          :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
                          => OpParams 
                          -> Tensor v'1 t

                          input: The tensor to reduce.

                          -> Tensor v'2 tidx

                          reduction_indices: The dimensions to reduce.

                          -> Tensor Build t

                          output: The reduced tensor.

                          svd

                          Arguments

                          :: OneOf `[Complex Double, Complex Float, Double, Float]` t 
                          => Tensor v'1 t

                          input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions - form matrices of size `[M, N]`. Let P be the minimum of M and N.

                          -> (Tensor Build t, Tensor Build t, Tensor Build t)

                          (s, u, v)

                          • s: Singular values. Shape is `[..., P]`.
                          • u: Left singular vectors. If full_matrices is False then shape is - `[..., M, P]`; if full_matrices is True then shape is - `[..., M, M]`. Undefined if compute_uv is False.
                          • v: Left singular vectors. If full_matrices is False then shape is - `[..., N, P]`. If full_matrices is True then shape is `[..., N, N]`. + retained with length 1.

                            sum' Source #

                            Arguments

                            :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) 
                            => OpParams 
                            -> Tensor v'1 t

                            input: The tensor to reduce.

                            -> Tensor v'2 tidx

                            reduction_indices: The dimensions to reduce.

                            -> Tensor Build t

                            output: The reduced tensor.

                            svd Source #

                            Arguments

                            :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                            => Tensor v'1 t

                            input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Let P be the minimum of M and N.

                            -> (Tensor Build t, Tensor Build t, Tensor Build t)

                            (s, u, v)

                            • s: Singular values. Shape is `[..., P]`.
                            • u: Left singular vectors. If full_matrices is False then shape is + `[..., M, P]`; if full_matrices is True then shape is + `[..., M, M]`. Undefined if compute_uv is False.
                            • v: Left singular vectors. If full_matrices is False then shape is + `[..., N, P]`. If full_matrices is True then shape is `[..., N, N]`. Undefined if compute_uv is false.

                            Computes the singular value decompositions of one or more matrices.

                            Computes the SVD of each inner matrix in input such that - `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`

                            ```prettyprint + `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`

                            ```python # a is a tensor containing a batch of matrices. # s is a tensor of singular values for each matrix. # u is the tensor containing of left singular vectors for each matrix. # v is the tensor containing of right singular vectors for each matrix. s, u, v = svd(a) s, _, _ = svd(a, compute_uv=False) - ```

                            svd'

                            Arguments

                            :: OneOf `[Complex Double, Complex Float, Double, Float]` t 
                            => OpParams 
                            -> Tensor v'1 t

                            input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions - form matrices of size `[M, N]`. Let P be the minimum of M and N.

                            -> (Tensor Build t, Tensor Build t, Tensor Build t)

                            (s, u, v)

                            • s: Singular values. Shape is `[..., P]`.
                            • u: Left singular vectors. If full_matrices is False then shape is - `[..., M, P]`; if full_matrices is True then shape is - `[..., M, M]`. Undefined if compute_uv is False.
                            • v: Left singular vectors. If full_matrices is False then shape is - `[..., N, P]`. If full_matrices is True then shape is `[..., N, N]`. - Undefined if compute_uv is false.

                            switch

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 t

                            data: The tensor to be forwarded to the appropriate output.

                            -> Tensor v'2 Bool

                            pred: A scalar that specifies which output port will receive data.

                            -> (Tensor Build t, Tensor Build t)

                            (output_false, output_true)

                            • output_false: If pred is false, data will be forwarded to this output.
                            • output_true: If pred is true, data will be forwarded to this output.

                            Forwards `data` to the output port determined by pred.

                            If pred is true, the `data` input is forwarded to output_true. Otherwise, - the data goes to output_false.

                            See also RefSwitch and Merge.

                            switch'

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 t

                            data: The tensor to be forwarded to the appropriate output.

                            -> Tensor v'2 Bool

                            pred: A scalar that specifies which output port will receive data.

                            -> (Tensor Build t, Tensor Build t)

                            (output_false, output_true)

                            • output_false: If pred is false, data will be forwarded to this output.
                            • output_true: If pred is true, data will be forwarded to this output.

                            tFRecordReader

                            Arguments

                            :: MonadBuild m' 
                            => m' (Tensor Ref ByteString)

                            reader_handle: The handle to reference the Reader.

                            A Reader that outputs the records from a TensorFlow Records file.

                            tFRecordReader'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> m' (Tensor Ref ByteString)

                            reader_handle: The handle to reference the Reader.

                            tFRecordReaderV2

                            Arguments

                            :: MonadBuild m' 
                            => m' ResourceHandle

                            reader_handle: The handle to reference the Reader.

                            A Reader that outputs the records from a TensorFlow Records file.

                            tFRecordReaderV2'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> m' ResourceHandle

                            reader_handle: The handle to reference the Reader.

                            takeManySparseFromTensorsMap

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor v'1 Int64

                            sparse_handles: 1-D, The N serialized SparseTensor objects. - Shape: `[N]`.

                            -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

                            (sparse_indices, sparse_values, sparse_shape)

                            • sparse_indices: 2-D. The indices of the minibatch SparseTensor.
                            • sparse_values: 1-D. The values of the minibatch SparseTensor.
                            • sparse_shape: 1-D. The shape of the minibatch SparseTensor.

                            Read SparseTensors from a SparseTensorsMap and concatenate them.

                            The input sparse_handles must be an int64 matrix of shape `[N, 1]` where + ```

                            svd' Source #

                            Arguments

                            :: OneOf '[Complex Double, Complex Float, Double, Float] t 
                            => OpParams 
                            -> Tensor v'1 t

                            input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Let P be the minimum of M and N.

                            -> (Tensor Build t, Tensor Build t, Tensor Build t)

                            (s, u, v)

                            • s: Singular values. Shape is `[..., P]`.
                            • u: Left singular vectors. If full_matrices is False then shape is + `[..., M, P]`; if full_matrices is True then shape is + `[..., M, M]`. Undefined if compute_uv is False.
                            • v: Left singular vectors. If full_matrices is False then shape is + `[..., N, P]`. If full_matrices is True then shape is `[..., N, N]`. + Undefined if compute_uv is false.

                            switch Source #

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 t

                            data: The tensor to be forwarded to the appropriate output.

                            -> Tensor v'2 Bool

                            pred: A scalar that specifies which output port will receive data.

                            -> (Tensor Build t, Tensor Build t)

                            (output_false, output_true)

                            • output_false: If pred is false, data will be forwarded to this output.
                            • output_true: If pred is true, data will be forwarded to this output.

                            Forwards `data` to the output port determined by pred.

                            If pred is true, the `data` input is forwarded to output_true. Otherwise, + the data goes to output_false.

                            See also RefSwitch and Merge.

                            switch' Source #

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 t

                            data: The tensor to be forwarded to the appropriate output.

                            -> Tensor v'2 Bool

                            pred: A scalar that specifies which output port will receive data.

                            -> (Tensor Build t, Tensor Build t)

                            (output_false, output_true)

                            • output_false: If pred is false, data will be forwarded to this output.
                            • output_true: If pred is true, data will be forwarded to this output.

                            tFRecordDataset Source #

                            Arguments

                            :: MonadBuild m' 
                            => Tensor v'1 ByteString

                            filenames: A scalar or vector containing the name(s) of the file(s) to be + read.

                            -> Tensor v'2 ByteString

                            compression_type: A scalar containing either (i) the empty string (no + compression), (ii) ZLIB, or (iii) GZIP.

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            Creates a dataset that emits the records from one or more TFRecord files.

                            tFRecordDataset' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            filenames: A scalar or vector containing the name(s) of the file(s) to be + read.

                            -> Tensor v'2 ByteString

                            compression_type: A scalar containing either (i) the empty string (no + compression), (ii) ZLIB, or (iii) GZIP.

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            tFRecordReader Source #

                            Arguments

                            :: MonadBuild m' 
                            => m' (Tensor Ref ByteString)

                            reader_handle: The handle to reference the Reader.

                            A Reader that outputs the records from a TensorFlow Records file.

                            tFRecordReader' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> m' (Tensor Ref ByteString)

                            reader_handle: The handle to reference the Reader.

                            tFRecordReaderV2 Source #

                            Arguments

                            :: MonadBuild m' 
                            => m' (Tensor Value ResourceHandle)

                            reader_handle: The handle to reference the Reader.

                            A Reader that outputs the records from a TensorFlow Records file.

                            tFRecordReaderV2' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> m' (Tensor Value ResourceHandle)

                            reader_handle: The handle to reference the Reader.

                            takeDataset Source #

                            Arguments

                            :: MonadBuild m' 
                            => [DataType]

                            output_types

                            -> Tensor v'1 ResourceHandle

                            input_dataset

                            -> Tensor v'2 Int64

                            count: A scalar representing the number of elements from the input_dataset + that should be taken. A value of `-1` indicates that all of input_dataset + is taken.

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            Creates a dataset that contains count elements from the input_dataset.

                            takeDataset' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> [DataType]

                            output_types

                            -> Tensor v'1 ResourceHandle

                            input_dataset

                            -> Tensor v'2 Int64

                            count: A scalar representing the number of elements from the input_dataset + that should be taken. A value of `-1` indicates that all of input_dataset + is taken.

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            takeManySparseFromTensorsMap Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor v'1 Int64

                            sparse_handles: 1-D, The N serialized SparseTensor objects. + Shape: `[N]`.

                            -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

                            (sparse_indices, sparse_values, sparse_shape)

                            • sparse_indices: 2-D. The indices of the minibatch SparseTensor.
                            • sparse_values: 1-D. The values of the minibatch SparseTensor.
                            • sparse_shape: 1-D. The shape of the minibatch SparseTensor.

                            Read SparseTensors from a SparseTensorsMap and concatenate them.

                            The input sparse_handles must be an int64 matrix of shape `[N, 1]` where N is the minibatch size and the rows correspond to the output handles of AddSparseToTensorsMap or AddManySparseToTensorsMap. The ranks of the original SparseTensor objects that went into the given input ops must all @@ -3461,26 +4001,27 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] - ```

                            takeManySparseFromTensorsMap'

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor v'1 Int64

                            sparse_handles: 1-D, The N serialized SparseTensor objects. - Shape: `[N]`.

                            -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

                            (sparse_indices, sparse_values, sparse_shape)

                            • sparse_indices: 2-D. The indices of the minibatch SparseTensor.
                            • sparse_values: 1-D. The values of the minibatch SparseTensor.
                            • sparse_shape: 1-D. The shape of the minibatch SparseTensor.

                            tan

                            Arguments

                            :: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
                            => Tensor v'1 t

                            x

                            -> Tensor Build t

                            y

                            Computes tan of x element-wise.

                            tanh

                            Arguments

                            :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                            => Tensor v'1 t

                            x

                            -> Tensor Build t

                            y

                            Computes hyperbolic tangent of x element-wise.

                            tanh'

                            Arguments

                            :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                            => OpParams 
                            -> Tensor v'1 t

                            x

                            -> Tensor Build t

                            y

                            tanhGrad

                            Arguments

                            :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                            => Tensor v'1 t

                            x

                            -> Tensor v'2 t

                            y

                            -> Tensor Build t

                            z

                            Computes the gradient for the tanh of x wrt its input.

                            Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and dy - is the corresponding input gradient.

                            tanhGrad'

                            Arguments

                            :: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
                            => OpParams 
                            -> Tensor v'1 t

                            x

                            -> Tensor v'2 t

                            y

                            -> Tensor Build t

                            z

                            temporaryVariable

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Shape

                            shape: The shape of the variable tensor.

                            -> m' (Tensor Ref dtype)

                            ref: A reference to the variable tensor.

                            Returns a tensor that may be mutated, but only persists within a single step.

                            This is an experimental op for internal use only and it is possible to use this + ```

                            takeManySparseFromTensorsMap' Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor v'1 Int64

                            sparse_handles: 1-D, The N serialized SparseTensor objects. + Shape: `[N]`.

                            -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

                            (sparse_indices, sparse_values, sparse_shape)

                            • sparse_indices: 2-D. The indices of the minibatch SparseTensor.
                            • sparse_values: 1-D. The values of the minibatch SparseTensor.
                            • sparse_shape: 1-D. The shape of the minibatch SparseTensor.

                            tan Source #

                            Arguments

                            :: OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t 
                            => Tensor v'1 t

                            x

                            -> Tensor Build t

                            y

                            Computes tan of x element-wise.

                            tanh Source #

                            Arguments

                            :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                            => Tensor v'1 t

                            x

                            -> Tensor Build t

                            y

                            Computes hyperbolic tangent of x element-wise.

                            tanhGrad Source #

                            Arguments

                            :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                            => Tensor v'1 t

                            x

                            -> Tensor v'2 t

                            y

                            -> Tensor Build t

                            z

                            Computes the gradient for the tanh of x wrt its input.

                            Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and dy + is the corresponding input gradient.

                            tanhGrad' Source #

                            Arguments

                            :: OneOf '[Complex Double, Complex Float, Word16, Double, Float] t 
                            => OpParams 
                            -> Tensor v'1 t

                            x

                            -> Tensor v'2 t

                            y

                            -> Tensor Build t

                            z

                            temporaryVariable Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Shape

                            shape: The shape of the variable tensor.

                            -> m' (Tensor Ref dtype)

                            ref: A reference to the variable tensor.

                            Returns a tensor that may be mutated, but only persists within a single step.

                            This is an experimental op for internal use only and it is possible to use this op in unsafe ways. DO NOT USE unless you fully understand the risks.

                            It is the caller's responsibility to ensure that ref is eventually passed to a matching DestroyTemporaryVariable op after all other uses have completed.

                            Outputs a ref to the tensor state so it may be read or modified.

                            E.g. var = state_ops._temporary_variable([1, 2], types.float_) var_name = var.op.name var = state_ops.assign(var, [[4.0, 5.0]]) var = state_ops.assign_add(var, [[6.0, 7.0]]) - final = state_ops._destroy_temporary_variable(var, var_name=var_name)

                            temporaryVariable'

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Shape

                            shape: The shape of the variable tensor.

                            -> m' (Tensor Ref dtype)

                            ref: A reference to the variable tensor.

                            tensorArray

                            Arguments

                            :: MonadBuild m' 
                            => DataType

                            dtype

                            -> Tensor v'1 Int32

                            size

                            -> m' (Tensor Ref ByteString)

                            handle

                            tensorArray'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> DataType

                            dtype

                            -> Tensor v'1 Int32

                            size

                            -> m' (Tensor Ref ByteString)

                            handle

                            tensorArrayCloseV2

                            Arguments

                            :: MonadBuild m' 
                            => Tensor v'1 ByteString

                            handle

                            -> m' ControlNode 

                            Deprecated. Use TensorArrayCloseV3

                            tensorArrayCloseV2'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> m' ControlNode 

                            tensorArrayCloseV3

                            Arguments

                            :: MonadBuild m' 
                            => ResourceHandle

                            handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

                            -> m' ControlNode 

                            Delete the TensorArray from its resource container. This enables

                            the user to close and release the resource in the middle of a step/run.

                            tensorArrayCloseV3'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> ResourceHandle

                            handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

                            -> m' ControlNode 

                            tensorArrayConcat

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value dtype, Tensor Value Int64)

                            (value, lengths)

                            • value
                            • lengths

                            tensorArrayConcat'

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value dtype, Tensor Value Int64)

                            (value, lengths)

                            • value
                            • lengths

                            tensorArrayConcatV2

                            Arguments

                            :: TensorType dtype 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> (Tensor Build dtype, Tensor Build Int64)

                            (value, lengths)

                            • value
                            • lengths

                            Deprecated. Use TensorArrayConcatV3

                            tensorArrayConcatV2'

                            Arguments

                            :: TensorType dtype 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> (Tensor Build dtype, Tensor Build Int64)

                            (value, lengths)

                            • value
                            • lengths

                            tensorArrayConcatV3

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value dtype, Tensor Value Int64)

                            (value, lengths)

                            Concat the elements from the TensorArray into value value.

                            Takes T elements of shapes

                            ``` (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) - ```

                            and concatenates them into a Tensor of shape:

                            ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```

                            All elements must have the same shape (excepting the first dimension).

                            tensorArrayConcatV3'

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value dtype, Tensor Value Int64)

                            (value, lengths)

                            tensorArrayGather Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayGather' Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayGatherV2 Source #

                            Arguments

                            :: TensorType dtype 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 Float

                            flow_in

                            -> Tensor Build dtype

                            value

                            Deprecated. Use TensorArrayGatherV3

                            tensorArrayGatherV2' Source #

                            Arguments

                            :: TensorType dtype 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 Float

                            flow_in

                            -> Tensor Build dtype

                            value

                            tensorArrayGatherV3 Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            indices: The locations in the TensorArray from which to read tensor elements.

                            -> Tensor v'3 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value dtype)

                            value: All of the elements in the TensorArray, concatenated along a new + axis (the new dimension 0).

                            Gather specific elements from the TensorArray into output value.

                            All elements selected by indices must have the same shape.

                            tensorArrayGatherV3' Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            indices: The locations in the TensorArray from which to read tensor elements.

                            -> Tensor v'3 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value dtype)

                            value: All of the elements in the TensorArray, concatenated along a new + axis (the new dimension 0).

                            tensorArrayGrad Source #

                            Arguments

                            :: MonadBuild m' 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Ref ByteString)

                            grad_handle

                            tensorArrayGrad' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Ref ByteString)

                            grad_handle

                            tensorArrayGradV2 Source #

                            Arguments

                            :: MonadBuild m' 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value ByteString)

                            grad_handle

                            Deprecated. Use TensorArrayGradV3

                            tensorArrayGradV2' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value ByteString)

                            grad_handle

                            tensorArrayGradV3 Source #

                            Arguments

                            :: MonadBuild m' 
                            => Tensor v'1 ResourceHandle

                            handle: The handle to the forward TensorArray.

                            -> Tensor v'2 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value ResourceHandle, Tensor Value Float)

                            (grad_handle, flow_out)

                            • grad_handle
                            • flow_out

                            Creates a TensorArray for storing the gradients of values in the given handle.

                            If the given TensorArray gradient already exists, returns a reference to it.

                            Locks the size of the original TensorArray by disabling its dynamic size flag.

                            • *A note about the input flow_in:**

                            The handle flow_in forces the execution of the gradient lookup to occur only after certain other operations have occurred. For example, when the forward TensorArray is dynamically sized, writes to this TensorArray may resize the object. The gradient TensorArray is statically sized based @@ -3492,19 +4033,27 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core flow to occur only after all writes have executed. That way the final size of the forward TensorArray is known when this operation is called.

                            • *A note about the source attribute:**

                            TensorArray gradient calls use an accumulator TensorArray object. If multiple gradients are calculated and run in the same session, the multiple - gradient nodes may accidentally flow throuth the same accumulator TensorArray. + gradient nodes may accidentally flow through the same accumulator TensorArray. This double counts and generally breaks the TensorArray gradient flow.

                            The solution is to identify which gradient call this particular TensorArray gradient is being called in. This is performed by identifying a unique string (e.g. "gradients", "gradients_1", ...) from the input gradient Tensor's name. This string is used as a suffix when creating the TensorArray gradient object here (the attribute source).

                            The attribute source is added as a suffix to the forward TensorArray's name when performing the creation / lookup, so that each separate gradient - calculation gets its own TensorArray accumulator.

                            tensorArrayGradV3'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> ResourceHandle

                            handle: The handle to the forward TensorArray.

                            -> Tensor v'2 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (ResourceHandle, Tensor Value Float)

                            (grad_handle, flow_out)

                            • grad_handle
                            • flow_out

                            tensorArrayPack

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayPack'

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayRead

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayRead'

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayReadV2

                            Arguments

                            :: TensorType dtype 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in

                            -> Tensor Build dtype

                            value

                            Deprecated. Use TensorArrayReadV3

                            tensorArrayReadV2'

                            Arguments

                            :: TensorType dtype 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in

                            -> Tensor Build dtype

                            value

                            tensorArrayReadV3

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value dtype)

                            value: The tensor that is read from the TensorArray.

                            Read an element from the TensorArray into output value.

                            tensorArrayReadV3'

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value dtype)

                            value: The tensor that is read from the TensorArray.

                            tensorArrayScatter

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayScatter'

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayScatterV2

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            Deprecated. Use TensorArrayScatterV3

                            tensorArrayScatterV2'

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            tensorArrayScatterV3

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            indices: The locations at which to write the tensor elements.

                            -> Tensor v'3 t

                            value: The concatenated tensor to write to the TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            Scatter the data from the input value into specific TensorArray elements.

                            indices must be a vector, its length must match the first dim of value.

                            tensorArrayScatterV3'

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            indices: The locations at which to write the tensor elements.

                            -> Tensor v'3 t

                            value: The concatenated tensor to write to the TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            tensorArraySize

                            Arguments

                            :: MonadBuild m' 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value Int32)

                            size

                            tensorArraySize'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value Int32)

                            size

                            tensorArraySizeV2

                            Arguments

                            :: Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> Tensor Build Int32

                            size

                            Deprecated. Use TensorArraySizeV3

                            tensorArraySizeV2'

                            Arguments

                            :: OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> Tensor Build Int32

                            size

                            tensorArraySizeV3

                            Arguments

                            :: MonadBuild m' 
                            => ResourceHandle

                            handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

                            -> Tensor v'2 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Int32)

                            size: The current size of the TensorArray.

                            Get the current size of the TensorArray.

                            tensorArraySizeV3'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> ResourceHandle

                            handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

                            -> Tensor v'2 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Int32)

                            size: The current size of the TensorArray.

                            tensorArraySplit

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Int64

                            lengths

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArraySplit'

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Int64

                            lengths

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArraySplitV2

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Int64

                            lengths

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            Deprecated. Use TensorArraySplitV3

                            tensorArraySplitV2'

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Int64

                            lengths

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            tensorArraySplitV3

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 t

                            value: The concatenated tensor to write to the TensorArray.

                            -> Tensor v'3 Int64

                            lengths: The vector of lengths, how to split the rows of value into the - TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            Split the data from the input value into TensorArray elements.

                            Assuming that lengths takes on values

                            ```(n0, n1, ..., n(T-1))```

                            and that value has shape

                            ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,

                            this splits values into a TensorArray with T tensors.

                            TensorArray index t will be the subtensor of values with starting position

                            ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```

                            and having size

                            ```nt x d0 x d1 x ...```

                            tensorArraySplitV3'

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 t

                            value: The concatenated tensor to write to the TensorArray.

                            -> Tensor v'3 Int64

                            lengths: The vector of lengths, how to split the rows of value into the - TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            tensorArrayUnpack

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayUnpack'

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayV2

                            Arguments

                            :: MonadBuild m' 
                            => DataType

                            dtype

                            -> Tensor v'1 Int32

                            size

                            -> m' (Tensor Value ByteString)

                            handle

                            Deprecated. Use TensorArrayV3

                            tensorArrayV2'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> DataType

                            dtype

                            -> Tensor v'1 Int32

                            size

                            -> m' (Tensor Value ByteString)

                            handle

                            tensorArrayV3

                            Arguments

                            :: MonadBuild m' 
                            => DataType

                            dtype: The type of the elements on the tensor_array.

                            -> Tensor v'1 Int32

                            size: The size of the array.

                            -> m' (ResourceHandle, Tensor Value Float)

                            (handle, flow)

                            • handle: The handle to the TensorArray.
                            • flow: A scalar used to control gradient flow.

                            An array of Tensors of given size, with data written via Write and read

                            via Read or Pack.

                            tensorArrayV3'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> DataType

                            dtype: The type of the elements on the tensor_array.

                            -> Tensor v'1 Int32

                            size: The size of the array.

                            -> m' (ResourceHandle, Tensor Value Float)

                            (handle, flow)

                            • handle: The handle to the TensorArray.
                            • flow: A scalar used to control gradient flow.

                            tensorArrayWrite

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayWrite'

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayWriteV2

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            Deprecated. Use TensorArrayGradV3

                            tensorArrayWriteV2'

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            tensorArrayWriteV3

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            index: The position to write to inside the TensorArray.

                            -> Tensor v'3 t

                            value: The tensor to write to the TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            Push an element onto the tensor_array.

                            tensorArrayWriteV3'

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            index: The position to write to inside the TensorArray.

                            -> Tensor v'3 t

                            value: The tensor to write to the TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            tensorSummary

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 t

                            tensor: A tensor to serialize.

                            -> Tensor Build ByteString

                            summary

                            Outputs a Summary protocol buffer with a tensor.

                            tensorSummary'

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 t

                            tensor: A tensor to serialize.

                            -> Tensor Build ByteString

                            summary

                            textLineReader

                            Arguments

                            :: MonadBuild m' 
                            => m' (Tensor Ref ByteString)

                            reader_handle: The handle to reference the Reader.

                            A Reader that outputs the lines of a file delimited by '\n'.

                            textLineReader'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> m' (Tensor Ref ByteString)

                            reader_handle: The handle to reference the Reader.

                            textLineReaderV2

                            Arguments

                            :: MonadBuild m' 
                            => m' ResourceHandle

                            reader_handle: The handle to reference the Reader.

                            A Reader that outputs the lines of a file delimited by '\n'.

                            textLineReaderV2'

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> m' ResourceHandle

                            reader_handle: The handle to reference the Reader.

                            threadUnsafeUnigramCandidateSampler

                            Arguments

                            :: Int64

                            num_sampled: Number of candidates to randomly sample per batch.

                            -> Int64

                            num_true: Number of true labels per context.

                            -> Int64

                            range_max: The sampler will sample integers from the interval [0, range_max).

                            -> Bool

                            unique: If unique is true, we sample with rejection, so that all sampled + calculation gets its own TensorArray accumulator.

                            tensorArrayGradV3' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> Tensor v'1 ResourceHandle

                            handle: The handle to the forward TensorArray.

                            -> Tensor v'2 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value ResourceHandle, Tensor Value Float)

                            (grad_handle, flow_out)

                            • grad_handle
                            • flow_out

                            tensorArrayPack Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayPack' Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayRead Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayRead' Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value dtype)

                            value

                            tensorArrayReadV2 Source #

                            Arguments

                            :: TensorType dtype 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in

                            -> Tensor Build dtype

                            value

                            Deprecated. Use TensorArrayReadV3

                            tensorArrayReadV2' Source #

                            Arguments

                            :: TensorType dtype 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in

                            -> Tensor Build dtype

                            value

                            tensorArrayReadV3 Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value dtype)

                            value: The tensor that is read from the TensorArray.

                            Read an element from the TensorArray into output value.

                            tensorArrayReadV3' Source #

                            Arguments

                            :: (MonadBuild m', TensorType dtype) 
                            => OpParams 
                            -> Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value dtype)

                            value: The tensor that is read from the TensorArray.

                            tensorArrayScatter Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayScatter' Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayScatterV2 Source #

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            Deprecated. Use TensorArrayScatterV3

                            tensorArrayScatterV2' Source #

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            indices

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            tensorArrayScatterV3 Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            indices: The locations at which to write the tensor elements.

                            -> Tensor v'3 t

                            value: The concatenated tensor to write to the TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            Scatter the data from the input value into specific TensorArray elements.

                            indices must be a vector, its length must match the first dim of value.

                            tensorArrayScatterV3' Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            indices: The locations at which to write the tensor elements.

                            -> Tensor v'3 t

                            value: The concatenated tensor to write to the TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            tensorArraySize Source #

                            Arguments

                            :: MonadBuild m' 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value Int32)

                            size

                            tensorArraySize' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> m' (Tensor Value Int32)

                            size

                            tensorArraySizeV2 Source #

                            Arguments

                            :: Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> Tensor Build Int32

                            size

                            Deprecated. Use TensorArraySizeV3

                            tensorArraySizeV2' Source #

                            Arguments

                            :: OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Float

                            flow_in

                            -> Tensor Build Int32

                            size

                            tensorArraySizeV3 Source #

                            Arguments

                            :: MonadBuild m' 
                            => Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

                            -> Tensor v'2 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Int32)

                            size: The current size of the TensorArray.

                            Get the current size of the TensorArray.

                            tensorArraySizeV3' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

                            -> Tensor v'2 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Int32)

                            size: The current size of the TensorArray.

                            tensorArraySplit Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Int64

                            lengths

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArraySplit' Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Int64

                            lengths

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArraySplitV2 Source #

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Int64

                            lengths

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            Deprecated. Use TensorArraySplitV3

                            tensorArraySplitV2' Source #

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Int64

                            lengths

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            tensorArraySplitV3 Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 t

                            value: The concatenated tensor to write to the TensorArray.

                            -> Tensor v'3 Int64

                            lengths: The vector of lengths, how to split the rows of value into the + TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            Split the data from the input value into TensorArray elements.

                            Assuming that lengths takes on values

                            ```(n0, n1, ..., n(T-1))```

                            and that value has shape

                            ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,

                            this splits values into a TensorArray with T tensors.

                            TensorArray index t will be the subtensor of values with starting position

                            ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```

                            and having size

                            ```nt x d0 x d1 x ...```

                            tensorArraySplitV3' Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 t

                            value: The concatenated tensor to write to the TensorArray.

                            -> Tensor v'3 Int64

                            lengths: The vector of lengths, how to split the rows of value into the + TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            tensorArrayUnpack Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayUnpack' Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 t

                            value

                            -> Tensor v'3 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayV2 Source #

                            Arguments

                            :: MonadBuild m' 
                            => DataType

                            dtype

                            -> Tensor v'1 Int32

                            size

                            -> m' (Tensor Value ByteString)

                            handle

                            Deprecated. Use TensorArrayV3

                            tensorArrayV2' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> DataType

                            dtype

                            -> Tensor v'1 Int32

                            size

                            -> m' (Tensor Value ByteString)

                            handle

                            tensorArrayV3 Source #

                            Arguments

                            :: MonadBuild m' 
                            => DataType

                            dtype: The type of the elements on the tensor_array.

                            -> Tensor v'1 Int32

                            size: The size of the array.

                            -> m' (Tensor Value ResourceHandle, Tensor Value Float)

                            (handle, flow)

                            • handle: The handle to the TensorArray.
                            • flow: A scalar used to control gradient flow.

                            An array of Tensors of given size.

                            Write data via Write and read via Read or Pack.

                            tensorArrayV3' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> DataType

                            dtype: The type of the elements on the tensor_array.

                            -> Tensor v'1 Int32

                            size: The size of the array.

                            -> m' (Tensor Value ResourceHandle, Tensor Value Float)

                            (handle, flow)

                            • handle: The handle to the TensorArray.
                            • flow: A scalar used to control gradient flow.

                            tensorArrayWrite Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayWrite' Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor Ref ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> m' (Tensor Value Float)

                            flow_out

                            tensorArrayWriteV2 Source #

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            Deprecated. Use TensorArrayGradV3

                            tensorArrayWriteV2' Source #

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            handle

                            -> Tensor v'2 Int32

                            index

                            -> Tensor v'3 t

                            value

                            -> Tensor v'4 Float

                            flow_in

                            -> Tensor Build Float

                            flow_out

                            tensorArrayWriteV3 Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            index: The position to write to inside the TensorArray.

                            -> Tensor v'3 t

                            value: The tensor to write to the TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            Push an element onto the tensor_array.

                            tensorArrayWriteV3' Source #

                            Arguments

                            :: (MonadBuild m', TensorType t) 
                            => OpParams 
                            -> Tensor v'1 ResourceHandle

                            handle: The handle to a TensorArray.

                            -> Tensor v'2 Int32

                            index: The position to write to inside the TensorArray.

                            -> Tensor v'3 t

                            value: The tensor to write to the TensorArray.

                            -> Tensor v'4 Float

                            flow_in: A float scalar that enforces proper chaining of operations.

                            -> m' (Tensor Value Float)

                            flow_out: A float scalar that enforces proper chaining of operations.

                            tensorDataset Source #

                            Arguments

                            :: (MonadBuild m', TensorTypes toutput_types) 
                            => TensorList v'1 toutput_types

                            components

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            Creates a dataset that emits components as a tuple of tensors once.

                            tensorDataset' Source #

                            Arguments

                            :: (MonadBuild m', TensorTypes toutput_types) 
                            => OpParams 
                            -> TensorList v'1 toutput_types

                            components

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            tensorSliceDataset Source #

                            Arguments

                            :: (MonadBuild m', TensorTypes toutput_types) 
                            => TensorList v'1 toutput_types

                            components

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            Creates a dataset that emits each dim-0 slice of components once.

                            tensorSliceDataset' Source #

                            Arguments

                            :: (MonadBuild m', TensorTypes toutput_types) 
                            => OpParams 
                            -> TensorList v'1 toutput_types

                            components

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            tensorSummary Source #

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 t

                            tensor: A tensor to serialize.

                            -> Tensor Build ByteString

                            summary

                            Outputs a Summary protocol buffer with a tensor.

                            This op is being phased out in favor of TensorSummaryV2, which lets callers pass + a tag as well as a serialized SummaryMetadata proto string that contains + plugin-specific data. We will keep this op to maintain backwards compatibility.

                            tensorSummary' Source #

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 t

                            tensor: A tensor to serialize.

                            -> Tensor Build ByteString

                            summary

                            tensorSummaryV2 Source #

                            Arguments

                            :: TensorType t 
                            => Tensor v'1 ByteString

                            tag: A string attached to this summary. Used for organization in TensorBoard.

                            -> Tensor v'2 t

                            tensor: A tensor to serialize.

                            -> Tensor v'3 ByteString

                            serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin + data.

                            -> Tensor Build ByteString

                            summary

                            Outputs a Summary protocol buffer with a tensor and per-plugin data.

                            tensorSummaryV2' Source #

                            Arguments

                            :: TensorType t 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            tag: A string attached to this summary. Used for organization in TensorBoard.

                            -> Tensor v'2 t

                            tensor: A tensor to serialize.

                            -> Tensor v'3 ByteString

                            serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin + data.

                            -> Tensor Build ByteString

                            summary

                            textLineDataset Source #

                            Arguments

                            :: MonadBuild m' 
                            => Tensor v'1 ByteString

                            filenames: A scalar or a vector containing the name(s) of the file(s) to be + read.

                            -> Tensor v'2 ByteString

                            compression_type: A scalar containing either (i) the empty string (no + compression), (ii) ZLIB, or (iii) GZIP.

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            Creates a dataset that emits the lines of one or more text files.

                            textLineDataset' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> Tensor v'1 ByteString

                            filenames: A scalar or a vector containing the name(s) of the file(s) to be + read.

                            -> Tensor v'2 ByteString

                            compression_type: A scalar containing either (i) the empty string (no + compression), (ii) ZLIB, or (iii) GZIP.

                            -> m' (Tensor Value ResourceHandle)

                            handle

                            textLineReader Source #

                            Arguments

                            :: MonadBuild m' 
                            => m' (Tensor Ref ByteString)

                            reader_handle: The handle to reference the Reader.

                            A Reader that outputs the lines of a file delimited by '\n'.

                            textLineReader' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> m' (Tensor Ref ByteString)

                            reader_handle: The handle to reference the Reader.

                            textLineReaderV2 Source #

                            Arguments

                            :: MonadBuild m' 
                            => m' (Tensor Value ResourceHandle)

                            reader_handle: The handle to reference the Reader.

                            A Reader that outputs the lines of a file delimited by '\n'.

                            textLineReaderV2' Source #

                            Arguments

                            :: MonadBuild m' 
                            => OpParams 
                            -> m' (Tensor Value ResourceHandle)

                            reader_handle: The handle to reference the Reader.

                            threadUnsafeUnigramCandidateSampler Source #

                            Arguments

                            :: MonadBuild m' 
                            => Int64

                            num_sampled: Number of candidates to randomly sample.

                            -> Int64

                            num_true: Number of true labels per context.

                            -> Int64

                            range_max: The sampler will sample integers from the interval [0, range_max).

                            -> Bool

                            unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

                            -> Tensor v'1 Int64

                            true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

                            -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

                            (sampled_candidates, true_expected_count, sampled_expected_count)

                            • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

                            -> Tensor v'1 Int64

                            true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

                            -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

                            (sampled_candidates, true_expected_count, sampled_expected_count)

                            • sampled_candidates: A vector of length num_sampled, in which each element is the ID of a sampled candidate.
                            • true_expected_count: A batch_size * num_true matrix, representing the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
                            • sampled_expected_count: A vector of length num_sampled, for each sampled @@ -3514,46 +4063,46 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core go/candidate-sampling.

                              For each batch, this op picks a single set of sampled candidate labels.

                              The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the - true labels.

                              threadUnsafeUnigramCandidateSampler'

                              Arguments

                              :: OpParams 
                              -> Int64

                              num_sampled: Number of candidates to randomly sample per batch.

                              -> Int64

                              num_true: Number of true labels per context.

                              -> Int64

                              range_max: The sampler will sample integers from the interval [0, range_max).

                              -> Bool

                              unique: If unique is true, we sample with rejection, so that all sampled + true labels.

                              threadUnsafeUnigramCandidateSampler' Source #

                              Arguments

                              :: MonadBuild m' 
                              => OpParams 
                              -> Int64

                              num_sampled: Number of candidates to randomly sample.

                              -> Int64

                              num_true: Number of true labels per context.

                              -> Int64

                              range_max: The sampler will sample integers from the interval [0, range_max).

                              -> Bool

                              unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

                              -> Tensor v'1 Int64

                              true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

                              -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

                              (sampled_candidates, true_expected_count, sampled_expected_count)

                              • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

                              -> Tensor v'1 Int64

                              true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

                              -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

                              (sampled_candidates, true_expected_count, sampled_expected_count)

                              • sampled_candidates: A vector of length num_sampled, in which each element is the ID of a sampled candidate.
                              • true_expected_count: A batch_size * num_true matrix, representing the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
                              • sampled_expected_count: A vector of length num_sampled, for each sampled candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

                              tile

                              Arguments

                              :: (TensorType t, OneOf `[Int32, Int64]` tmultiples) 
                              => Tensor v'1 t

                              input: 1-D or higher.

                              -> Tensor v'2 tmultiples

                              multiples: 1-D. Length must be the same as the number of dimensions in input

                              -> Tensor Build t

                              output

                              Constructs a tensor by tiling a given tensor.

                              This operation creates a new tensor by replicating input multiples times. + probability.

                              tile Source #

                              Arguments

                              :: (TensorType t, OneOf '[Int32, Int64] tmultiples) 
                              => Tensor v'1 t

                              input: 1-D or higher.

                              -> Tensor v'2 tmultiples

                              multiples: 1-D. Length must be the same as the number of dimensions in input

                              -> Tensor Build t

                              output

                              Constructs a tensor by tiling a given tensor.

                              This operation creates a new tensor by replicating input multiples times. The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, and the values of input are replicated `multiples[i]` times along the ith dimension. For example, tiling `[a b c d]` by `[2]` produces - `[a b c d a b c d]`.

                              tile'

                              Arguments

                              :: (TensorType t, OneOf `[Int32, Int64]` tmultiples) 
                              => OpParams 
                              -> Tensor v'1 t

                              input: 1-D or higher.

                              -> Tensor v'2 tmultiples

                              multiples: 1-D. Length must be the same as the number of dimensions in input

                              -> Tensor Build t

                              output

                              tileGrad

                              Arguments

                              :: TensorType t 
                              => Tensor v'1 t

                              input

                              -> Tensor v'2 Int32

                              multiples

                              -> Tensor Build t

                              output

                              Returns the gradient of Tile.

                              Since Tile takes an input and repeats the input multiples times + `[a b c d a b c d]`.

                              tile' Source #

                              Arguments

                              :: (TensorType t, OneOf '[Int32, Int64] tmultiples) 
                              => OpParams 
                              -> Tensor v'1 t

                              input: 1-D or higher.

                              -> Tensor v'2 tmultiples

                              multiples: 1-D. Length must be the same as the number of dimensions in input

                              -> Tensor Build t

                              output

                              tileGrad Source #

                              Arguments

                              :: TensorType t 
                              => Tensor v'1 t

                              input

                              -> Tensor v'2 Int32

                              multiples

                              -> Tensor Build t

                              output

                              Returns the gradient of Tile.

                              Since Tile takes an input and repeats the input multiples times along each dimension, TileGrad takes in multiples and aggregates - each repeated tile of input into output.

                              tileGrad'

                              Arguments

                              :: TensorType t 
                              => OpParams 
                              -> Tensor v'1 t

                              input

                              -> Tensor v'2 Int32

                              multiples

                              -> Tensor Build t

                              output

                              topK

                              Arguments

                              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                              => Int64

                              k: Number of top elements to look for along the last dimension (along each - row for matrices).

                              -> Tensor v'1 t

                              input: 1-D or higher with last dimension at least k.

                              -> (Tensor Build t, Tensor Build Int32)

                              (values, indices)

                              • values: The k largest elements along each last dimensional slice.
                              • indices: The indices of values within the last dimension of input.

                              Finds values and indices of the k largest elements for the last dimension.

                              If the input is a vector (rank-1), finds the k largest entries in the vector + each repeated tile of input into output.

                              tileGrad' Source #

                              Arguments

                              :: TensorType t 
                              => OpParams 
                              -> Tensor v'1 t

                              input

                              -> Tensor v'2 Int32

                              multiples

                              -> Tensor Build t

                              output

                              topK Source #

                              Arguments

                              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                              => Int64

                              k: Number of top elements to look for along the last dimension (along each + row for matrices).

                              -> Tensor v'1 t

                              input: 1-D or higher with last dimension at least k.

                              -> (Tensor Build t, Tensor Build Int32)

                              (values, indices)

                              • values: The k largest elements along each last dimensional slice.
                              • indices: The indices of values within the last dimension of input.

                              Finds values and indices of the k largest elements for the last dimension.

                              If the input is a vector (rank-1), finds the k largest entries in the vector and outputs their values and indices as vectors. Thus `values[j]` is the j-th largest entry in input, and its index is `indices[j]`.

                              For matrices (resp. higher rank input), computes the top k entries in each - row (resp. vector along the last dimension). Thus,

                              values.shape = indices.shape = input.shape[:-1] + [k]

                              If two elements are equal, the lower-index element appears first.

                              If k varies dynamically, use TopKV2 below.

                              topK'

                              Arguments

                              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                              => OpParams 
                              -> Int64

                              k: Number of top elements to look for along the last dimension (along each - row for matrices).

                              -> Tensor v'1 t

                              input: 1-D or higher with last dimension at least k.

                              -> (Tensor Build t, Tensor Build Int32)

                              (values, indices)

                              • values: The k largest elements along each last dimensional slice.
                              • indices: The indices of values within the last dimension of input.

                              topKV2

                              Arguments

                              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                              => Tensor v'1 t

                              input: 1-D or higher with last dimension at least k.

                              -> Tensor v'2 Int32

                              k: 0-D. Number of top elements to look for along the last dimension (along each - row for matrices).

                              -> (Tensor Build t, Tensor Build Int32)

                              (values, indices)

                              • values: The k largest elements along each last dimensional slice.
                              • indices: The indices of values within the last dimension of input.

                              Finds values and indices of the k largest elements for the last dimension.

                              If the input is a vector (rank-1), finds the k largest entries in the vector + row (resp. vector along the last dimension). Thus,

                              values.shape = indices.shape = input.shape[:-1] + [k]

                              If two elements are equal, the lower-index element appears first.

                              If k varies dynamically, use TopKV2 below.

                              topK' Source #

                              Arguments

                              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                              => OpParams 
                              -> Int64

                              k: Number of top elements to look for along the last dimension (along each + row for matrices).

                              -> Tensor v'1 t

                              input: 1-D or higher with last dimension at least k.

                              -> (Tensor Build t, Tensor Build Int32)

                              (values, indices)

                              • values: The k largest elements along each last dimensional slice.
                              • indices: The indices of values within the last dimension of input.

                              topKV2 Source #

                              Arguments

                              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                              => Tensor v'1 t

                              input: 1-D or higher with last dimension at least k.

                              -> Tensor v'2 Int32

                              k: 0-D. Number of top elements to look for along the last dimension (along each + row for matrices).

                              -> (Tensor Build t, Tensor Build Int32)

                              (values, indices)

                              • values: The k largest elements along each last dimensional slice.
                              • indices: The indices of values within the last dimension of input.

                              Finds values and indices of the k largest elements for the last dimension.

                              If the input is a vector (rank-1), finds the k largest entries in the vector and outputs their values and indices as vectors. Thus `values[j]` is the j-th largest entry in input, and its index is `indices[j]`.

                              For matrices (resp. higher rank input), computes the top k entries in each - row (resp. vector along the last dimension). Thus,

                              values.shape = indices.shape = input.shape[:-1] + [k]

                              If two elements are equal, the lower-index element appears first.

                              topKV2'

                              Arguments

                              :: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                              => OpParams 
                              -> Tensor v'1 t

                              input: 1-D or higher with last dimension at least k.

                              -> Tensor v'2 Int32

                              k: 0-D. Number of top elements to look for along the last dimension (along each - row for matrices).

                              -> (Tensor Build t, Tensor Build Int32)

                              (values, indices)

                              • values: The k largest elements along each last dimensional slice.
                              • indices: The indices of values within the last dimension of input.

                              transpose

                              Arguments

                              :: (TensorType t, OneOf `[Int32, Int64]` tperm) 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 tperm

                              perm

                              -> Tensor Build t

                              y

                              Shuffle dimensions of x according to a permutation.

                              The output y has the same rank as x. The shapes of x and y satisfy: - `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

                              transpose'

                              Arguments

                              :: (TensorType t, OneOf `[Int32, Int64]` tperm) 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 tperm

                              perm

                              -> Tensor Build t

                              y

                              truncateDiv

                              Arguments

                              :: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              Returns x / y element-wise for integer types.

                              Truncation designates that negative numbers will round fractional quantities + row (resp. vector along the last dimension). Thus,

                              values.shape = indices.shape = input.shape[:-1] + [k]

                              If two elements are equal, the lower-index element appears first.

                              topKV2' Source #

                              Arguments

                              :: OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                              => OpParams 
                              -> Tensor v'1 t

                              input: 1-D or higher with last dimension at least k.

                              -> Tensor v'2 Int32

                              k: 0-D. Number of top elements to look for along the last dimension (along each + row for matrices).

                              -> (Tensor Build t, Tensor Build Int32)

                              (values, indices)

                              • values: The k largest elements along each last dimensional slice.
                              • indices: The indices of values within the last dimension of input.

                              transpose Source #

                              Arguments

                              :: (TensorType t, OneOf '[Int32, Int64] tperm) 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 tperm

                              perm

                              -> Tensor Build t

                              y

                              Shuffle dimensions of x according to a permutation.

                              The output y has the same rank as x. The shapes of x and y satisfy: + `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

                              transpose' Source #

                              Arguments

                              :: (TensorType t, OneOf '[Int32, Int64] tperm) 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 tperm

                              perm

                              -> Tensor Build t

                              y

                              truncateDiv Source #

                              Arguments

                              :: OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              Returns x / y element-wise for integer types.

                              Truncation designates that negative numbers will round fractional quantities toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different than Python semantics. See FloorDiv for a division function that matches Python Semantics.

                              • NOTE*: TruncateDiv supports broadcasting. More about broadcasting - here

                              truncateMod

                              Arguments

                              :: OneOf `[Int32, Int64, Double, Float]` t 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              Returns element-wise remainder of division. This emulates C semantics where

                              true, this follows C semantics in that the result here is consistent - with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.

                              • NOTE*: Mod supports broadcasting. More about broadcasting - here

                              truncateMod'

                              Arguments

                              :: OneOf `[Int32, Int64, Double, Float]` t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              truncatedNormal

                              Arguments

                              :: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
                              => Tensor v'1 t

                              shape: The shape of the output tensor.

                              -> m' (Tensor Value dtype)

                              output: A tensor of the specified shape filled with random truncated normal + here

                              truncateMod Source #

                              Arguments

                              :: OneOf '[Int32, Int64, Double, Float] t 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              Returns element-wise remainder of division. This emulates C semantics in that

                              the result here is consistent with a truncating divide. E.g. `truncate(x / y) * + y + truncate_mod(x, y) = x`.

                              • NOTE*: TruncateMod supports broadcasting. More about broadcasting + here

                              truncateMod' Source #

                              Arguments

                              :: OneOf '[Int32, Int64, Double, Float] t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              truncatedNormal Source #

                              Arguments

                              :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                              => Tensor v'1 t

                              shape: The shape of the output tensor.

                              -> m' (Tensor Value dtype)

                              output: A tensor of the specified shape filled with random truncated normal values.

                              Outputs random values from a truncated normal distribution.

                              The generated values follow a normal distribution with mean 0 and standard deviation 1, except that values whose magnitude is more than 2 standard - deviations from the mean are dropped and re-picked.

                              truncatedNormal'

                              Arguments

                              :: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
                              => OpParams 
                              -> Tensor v'1 t

                              shape: The shape of the output tensor.

                              -> m' (Tensor Value dtype)

                              output: A tensor of the specified shape filled with random truncated normal - values.

                              uniformCandidateSampler

                              Arguments

                              :: Int64

                              num_sampled: Number of candidates to randomly sample per batch.

                              -> Int64

                              num_true: Number of true labels per context.

                              -> Int64

                              range_max: The sampler will sample integers from the interval [0, range_max).

                              -> Bool

                              unique: If unique is true, we sample with rejection, so that all sampled + deviations from the mean are dropped and re-picked.

                              truncatedNormal' Source #

                              Arguments

                              :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) 
                              => OpParams 
                              -> Tensor v'1 t

                              shape: The shape of the output tensor.

                              -> m' (Tensor Value dtype)

                              output: A tensor of the specified shape filled with random truncated normal + values.

                              uniformCandidateSampler Source #

                              Arguments

                              :: MonadBuild m' 
                              => Int64

                              num_sampled: Number of candidates to randomly sample.

                              -> Int64

                              num_true: Number of true labels per context.

                              -> Int64

                              range_max: The sampler will sample integers from the interval [0, range_max).

                              -> Bool

                              unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

                              -> Tensor v'1 Int64

                              true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

                              -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

                              (sampled_candidates, true_expected_count, sampled_expected_count)

                              • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

                              -> Tensor v'1 Int64

                              true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

                              -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

                              (sampled_candidates, true_expected_count, sampled_expected_count)

                              • sampled_candidates: A vector of length num_sampled, in which each element is the ID of a sampled candidate.
                              • true_expected_count: A batch_size * num_true matrix, representing the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
                              • sampled_expected_count: A vector of length num_sampled, for each sampled @@ -3563,63 +4112,73 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core go/candidate-sampling.

                                For each batch, this op picks a single set of sampled candidate labels.

                                The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the - true labels.

                                uniformCandidateSampler'

                                Arguments

                                :: OpParams 
                                -> Int64

                                num_sampled: Number of candidates to randomly sample per batch.

                                -> Int64

                                num_true: Number of true labels per context.

                                -> Int64

                                range_max: The sampler will sample integers from the interval [0, range_max).

                                -> Bool

                                unique: If unique is true, we sample with rejection, so that all sampled + true labels.

                                uniformCandidateSampler' Source #

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> Int64

                                num_sampled: Number of candidates to randomly sample.

                                -> Int64

                                num_true: Number of true labels per context.

                                -> Int64

                                range_max: The sampler will sample integers from the interval [0, range_max).

                                -> Bool

                                unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

                                -> Tensor v'1 Int64

                                true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

                                -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

                                (sampled_candidates, true_expected_count, sampled_expected_count)

                                • sampled_candidates: A vector of length num_sampled, in which each element is + estimate the post-rejection sampling probabilities.

                                -> Tensor v'1 Int64

                                true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

                                -> m' (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

                                (sampled_candidates, true_expected_count, sampled_expected_count)

                                • sampled_candidates: A vector of length num_sampled, in which each element is the ID of a sampled candidate.
                                • true_expected_count: A batch_size * num_true matrix, representing the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
                                • sampled_expected_count: A vector of length num_sampled, for each sampled candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

                                unique

                                Arguments

                                :: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
                                => Tensor v'1 t

                                x: 1-D.

                                -> (Tensor Build t, Tensor Build out_idx)

                                (y, idx)

                                • y: 1-D.
                                • idx: 1-D.

                                Finds unique elements in a 1-D tensor.

                                This operation returns a tensor y containing all of the unique elements of x + probability.

                                unique Source #

                                Arguments

                                :: (TensorType t, OneOf '[Int32, Int64] out_idx) 
                                => Tensor v'1 t

                                x: 1-D.

                                -> (Tensor Build t, Tensor Build out_idx)

                                (y, idx)

                                • y: 1-D.
                                • idx: 1-D.

                                Finds unique elements in a 1-D tensor.

                                This operation returns a tensor y containing all of the unique elements of x sorted in the same order that they occur in x. This operation also returns a tensor idx the same size as x that contains the index of each value of x - in the unique output y. In other words:

                                `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

                                For example:

                                ```prettyprint + in the unique output y. In other words:

                                `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

                                For example:

                                ``` # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - ```

                                unique'

                                Arguments

                                :: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
                                => OpParams 
                                -> Tensor v'1 t

                                x: 1-D.

                                -> (Tensor Build t, Tensor Build out_idx)

                                (y, idx)

                                • y: 1-D.
                                • idx: 1-D.

                                uniqueWithCounts

                                Arguments

                                :: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
                                => Tensor v'1 t

                                x: 1-D.

                                -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)

                                (y, idx, count)

                                • y: 1-D.
                                • idx: 1-D.
                                • count: 1-D.

                                Finds unique elements in a 1-D tensor.

                                This operation returns a tensor y containing all of the unique elements of x + ```

                                unique' Source #

                                Arguments

                                :: (TensorType t, OneOf '[Int32, Int64] out_idx) 
                                => OpParams 
                                -> Tensor v'1 t

                                x: 1-D.

                                -> (Tensor Build t, Tensor Build out_idx)

                                (y, idx)

                                • y: 1-D.
                                • idx: 1-D.

                                uniqueWithCounts Source #

                                Arguments

                                :: (TensorType t, OneOf '[Int32, Int64] out_idx) 
                                => Tensor v'1 t

                                x: 1-D.

                                -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)

                                (y, idx, count)

                                • y: 1-D.
                                • idx: 1-D.
                                • count: 1-D.

                                Finds unique elements in a 1-D tensor.

                                This operation returns a tensor y containing all of the unique elements of x sorted in the same order that they occur in x. This operation also returns a tensor idx the same size as x that contains the index of each value of x in the unique output y. Finally, it returns a third tensor count that - contains the count of each element of y in x. In other words:

                                `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

                                For example:

                                ```prettyprint + contains the count of each element of y in x. In other words:

                                `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

                                For example:

                                ``` # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, idx, count = unique_with_counts(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] - ```

                                uniqueWithCounts'

                                Arguments

                                :: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
                                => OpParams 
                                -> Tensor v'1 t

                                x: 1-D.

                                -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)

                                (y, idx, count)

                                • y: 1-D.
                                • idx: 1-D.
                                • count: 1-D.

                                unpack

                                Arguments

                                :: TensorType t 
                                => Int64

                                num

                                -> Tensor v'1 t

                                value: 1-D or higher, with axis dimension size equal to num.

                                -> [Tensor Build t]

                                output: The list of tensors unpacked from value.

                                Unpacks a given dimension of a rank-R tensor into num rank-`(R-1)` tensors.

                                Unpacks num tensors from value by chipping it along the axis dimension. + ```

                                uniqueWithCounts' Source #

                                Arguments

                                :: (TensorType t, OneOf '[Int32, Int64] out_idx) 
                                => OpParams 
                                -> Tensor v'1 t

                                x: 1-D.

                                -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)

                                (y, idx, count)

                                • y: 1-D.
                                • idx: 1-D.
                                • count: 1-D.

                                unpack Source #

                                Arguments

                                :: TensorType t 
                                => Int64

                                num

                                -> Tensor v'1 t

                                value: 1-D or higher, with axis dimension size equal to num.

                                -> [Tensor Build t]

                                output: The list of tensors unpacked from value.

                                Unpacks a given dimension of a rank-R tensor into num rank-`(R-1)` tensors.

                                Unpacks num tensors from value by chipping it along the axis dimension. For example, given a tensor of shape `(A, B, C, D)`;

                                If `axis == 0` then the i'th tensor in output is the slice `value[i, :, :, :]` and each tensor in output will have shape `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike split).

                                If `axis == 1` then the i'th tensor in output is the slice `value[:, i, :, :]` and each tensor in output will have shape `(A, C, D)`. - Etc.

                                This is the opposite of pack.

                                unpack'

                                Arguments

                                :: TensorType t 
                                => OpParams 
                                -> Int64

                                num

                                -> Tensor v'1 t

                                value: 1-D or higher, with axis dimension size equal to num.

                                -> [Tensor Build t]

                                output: The list of tensors unpacked from value.

                                unsortedSegmentSum

                                Arguments

                                :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                                => Tensor v'1 t

                                data

                                -> Tensor v'2 tindices

                                segment_ids: A tensor whose shape is a prefix of `data.shape`.

                                -> Tensor v'3 Int32

                                num_segments

                                -> Tensor Build t

                                output: Has same shape as data, except for the first `segment_ids.rank` + Etc.

                                This is the opposite of pack.

                                unpack' Source #

                                Arguments

                                :: TensorType t 
                                => OpParams 
                                -> Int64

                                num

                                -> Tensor v'1 t

                                value: 1-D or higher, with axis dimension size equal to num.

                                -> [Tensor Build t]

                                output: The list of tensors unpacked from value.

                                unsortedSegmentMax Source #

                                Arguments

                                :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                                => Tensor v'1 t

                                data

                                -> Tensor v'2 tindices

                                segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension.

                                -> Tensor v'3 Int32

                                num_segments

                                -> Tensor Build t

                                output: Has same shape as data, except for dimension 0 which + has size num_segments.

                                Computes the Max along segments of a tensor.

                                Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                                This operator is similar to the unsorted segment sum operator. + Instead of computing the sum over segments, it computes the maximum + such that:

                                \(output_i = max_j data_j\) where max is over j such + that `segment_ids[j] == i`.

                                If the maximum is empty for a given segment ID i, it outputs the smallest possible value for specific numeric type, + `output[i] = numeric_limitsT::min()`.

                                style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt + /div

                                unsortedSegmentMax' Source #

                                Arguments

                                :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                                => OpParams 
                                -> Tensor v'1 t

                                data

                                -> Tensor v'2 tindices

                                segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension.

                                -> Tensor v'3 Int32

                                num_segments

                                -> Tensor Build t

                                output: Has same shape as data, except for dimension 0 which + has size num_segments.

                                unsortedSegmentSum Source #

                                Arguments

                                :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                                => Tensor v'1 t

                                data

                                -> Tensor v'2 tindices

                                segment_ids: A tensor whose shape is a prefix of `data.shape`.

                                -> Tensor v'3 Int32

                                num_segments

                                -> Tensor Build t

                                output: Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size - num_segments.

                                Computes the sum along segments of a tensor.

                                Read the section on - Segmentation for an explanation - of segments.

                                Computes a tensor such that + num_segments.

                                Computes the sum along segments of a tensor.

                                Read @{$math_ops#segmentation$the section on segmentation} for an explanation of + segments.

                                Computes a tensor such that `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such that `segment_ids[j...] == i`. Unlike SegmentSum, segment_ids need not be sorted and need not cover all values in the full range of valid values.

                                If the sum is empty for a given segment ID i, `output[i] = 0`.

                                num_segments should equal the number of distinct segment IDs.

                                style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/UnsortedSegmentSum.png" alt - /div

                                unsortedSegmentSum'

                                Arguments

                                :: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
                                => OpParams 
                                -> Tensor v'1 t

                                data

                                -> Tensor v'2 tindices

                                segment_ids: A tensor whose shape is a prefix of `data.shape`.

                                -> Tensor v'3 Int32

                                num_segments

                                -> Tensor Build t

                                output: Has same shape as data, except for the first `segment_ids.rank` + style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt + /div

                                unsortedSegmentSum' Source #

                                Arguments

                                :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) 
                                => OpParams 
                                -> Tensor v'1 t

                                data

                                -> Tensor v'2 tindices

                                segment_ids: A tensor whose shape is a prefix of `data.shape`.

                                -> Tensor v'3 Int32

                                num_segments

                                -> Tensor Build t

                                output: Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size - num_segments.

                                unstage

                                Arguments

                                :: (MonadBuild m', TensorTypes dtypes) 
                                => m' (TensorList Value dtypes)

                                values

                                Op is similar to a lightweight Dequeue. The basic funtionality is similar to

                                dequeue with many fewer capabilities and options. This Op is optimized for - performance.

                                unstage'

                                Arguments

                                :: (MonadBuild m', TensorTypes dtypes) 
                                => OpParams 
                                -> m' (TensorList Value dtypes)

                                values

                                varHandleOp

                                Arguments

                                :: MonadBuild m' 
                                => DataType

                                dtype: the type of this variable. Must agree with the dtypes - of all ops using this variable.

                                -> Shape

                                shape: The (possibly partially specified) shape of this variable.

                                -> m' ResourceHandle

                                resource

                                Creates a handle to a Variable resource.

                                varHandleOp'

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> DataType

                                dtype: the type of this variable. Must agree with the dtypes - of all ops using this variable.

                                -> Shape

                                shape: The (possibly partially specified) shape of this variable.

                                -> m' ResourceHandle

                                resource

                                varIsInitializedOp

                                Arguments

                                :: MonadBuild m' 
                                => ResourceHandle

                                resource: the input resource handle.

                                -> m' (Tensor Value Bool)

                                is_initialized: a scalar boolean which is true if the variable has been - initialized.

                                Checks whether a resource handle-based variable has been initialized.

                                varIsInitializedOp'

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> ResourceHandle

                                resource: the input resource handle.

                                -> m' (Tensor Value Bool)

                                is_initialized: a scalar boolean which is true if the variable has been - initialized.

                                variable

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => Shape

                                shape

                                -> m' (Tensor Ref dtype)

                                ref

                                Use VariableV2 instead.

                                variable'

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => OpParams 
                                -> Shape

                                shape

                                -> m' (Tensor Ref dtype)

                                ref

                                variableV2

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => Shape

                                shape: The shape of the variable tensor.

                                -> m' (Tensor Ref dtype)

                                ref: A reference to the variable tensor.

                                Holds state in the form of a tensor that persists across steps.

                                Outputs a ref to the tensor state so it may be read or modified. + num_segments.

                                unstage Source #

                                Arguments

                                :: (MonadBuild m', TensorTypes dtypes) 
                                => m' (TensorList Value dtypes)

                                values

                                Op is similar to a lightweight Dequeue.

                                The basic functionality is similar to dequeue with many fewer + capabilities and options. This Op is optimized for performance.

                                unstage' Source #

                                Arguments

                                :: (MonadBuild m', TensorTypes dtypes) 
                                => OpParams 
                                -> m' (TensorList Value dtypes)

                                values

                                varHandleOp Source #

                                Arguments

                                :: MonadBuild m' 
                                => DataType

                                dtype: the type of this variable. Must agree with the dtypes + of all ops using this variable.

                                -> Shape

                                shape: The (possibly partially specified) shape of this variable.

                                -> m' (Tensor Value ResourceHandle)

                                resource

                                Creates a handle to a Variable resource.

                                varHandleOp' Source #

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> DataType

                                dtype: the type of this variable. Must agree with the dtypes + of all ops using this variable.

                                -> Shape

                                shape: The (possibly partially specified) shape of this variable.

                                -> m' (Tensor Value ResourceHandle)

                                resource

                                varIsInitializedOp Source #

                                Arguments

                                :: MonadBuild m' 
                                => Tensor v'1 ResourceHandle

                                resource: the input resource handle.

                                -> m' (Tensor Value Bool)

                                is_initialized: a scalar boolean which is true if the variable has been + initialized.

                                Checks whether a resource handle-based variable has been initialized.

                                varIsInitializedOp' Source #

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> Tensor v'1 ResourceHandle

                                resource: the input resource handle.

                                -> m' (Tensor Value Bool)

                                is_initialized: a scalar boolean which is true if the variable has been + initialized.

                                variable Source #

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => Shape

                                shape

                                -> m' (Tensor Ref dtype)

                                ref

                                Use VariableV2 instead.

                                variable' Source #

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => OpParams 
                                -> Shape

                                shape

                                -> m' (Tensor Ref dtype)

                                ref

                                variableV2 Source #

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => Shape

                                shape: The shape of the variable tensor.

                                -> m' (Tensor Ref dtype)

                                ref: A reference to the variable tensor.

                                Holds state in the form of a tensor that persists across steps.

                                Outputs a ref to the tensor state so it may be read or modified. TODO(zhifengc/mrry): Adds a pointer to a more detail document - about sharing states in tensorflow.

                                variableV2'

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => OpParams 
                                -> Shape

                                shape: The shape of the variable tensor.

                                -> m' (Tensor Ref dtype)

                                ref: A reference to the variable tensor.

                                where'

                                Arguments

                                :: Tensor v'1 Bool

                                input

                                -> Tensor Build Int64

                                index

                                Returns locations of true values in a boolean tensor.

                                This operation returns the coordinates of true elements in input. The + about sharing states in tensorflow.

                                variableV2' Source #

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => OpParams 
                                -> Shape

                                shape: The shape of the variable tensor.

                                -> m' (Tensor Ref dtype)

                                ref: A reference to the variable tensor.

                                where' Source #

                                Arguments

                                :: Tensor v'1 Bool

                                input

                                -> Tensor Build Int64

                                index

                                Returns locations of true values in a boolean tensor.

                                This operation returns the coordinates of true elements in input. The coordinates are returned in a 2-D tensor where the first dimension (rows) represents the number of true elements, and the second dimension (columns) represents the coordinates of the true elements. Keep in mind, the shape of the output tensor can vary depending on how many true values there are in - input. Indices are output in row-major order.

                                For example:

                                ```prettyprint + input. Indices are output in row-major order.

                                For example:

                                ``` # input tensor is [[True, False] # [True, False]] # input has two true values, so output has two coordinates. @@ -3638,19 +4197,18 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [1, 0, 1], [1, 1, 1], [2, 1, 1]] - ```

                                where''

                                Arguments

                                :: OpParams 
                                -> Tensor v'1 Bool

                                input

                                -> Tensor Build Int64

                                index

                                wholeFileReader

                                Arguments

                                :: MonadBuild m' 
                                => m' (Tensor Ref ByteString)

                                reader_handle: The handle to reference the Reader.

                                A Reader that outputs the entire contents of a file as a value.

                                To use, enqueue filenames in a Queue. The output of ReaderRead will - be a filename (key) and the contents of that file (value).

                                wholeFileReader'

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> m' (Tensor Ref ByteString)

                                reader_handle: The handle to reference the Reader.

                                wholeFileReaderV2

                                Arguments

                                :: MonadBuild m' 
                                => m' ResourceHandle

                                reader_handle: The handle to reference the Reader.

                                A Reader that outputs the entire contents of a file as a value.

                                To use, enqueue filenames in a Queue. The output of ReaderRead will - be a filename (key) and the contents of that file (value).

                                wholeFileReaderV2'

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> m' ResourceHandle

                                reader_handle: The handle to reference the Reader.

                                writeFile

                                Arguments

                                :: MonadBuild m' 
                                => Tensor v'1 ByteString

                                filename: scalar. The name of the file to which we write the contents.

                                -> Tensor v'2 ByteString

                                contents: scalar. The content to be written to the output file.

                                -> m' ControlNode 

                                Writes contents to the file at input filename. Creates file if not existing.

                                writeFile'

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> Tensor v'1 ByteString

                                filename: scalar. The name of the file to which we write the contents.

                                -> Tensor v'2 ByteString

                                contents: scalar. The content to be written to the output file.

                                -> m' ControlNode 

                                zerosLike

                                Arguments

                                :: TensorType t 
                                => Tensor v'1 t

                                x: a tensor of type T.

                                -> Tensor Build t

                                y: a tensor of the same shape and type as x but filled with zeros.

                                Returns a tensor of zeros with the same shape and type as x.

                                zerosLike'

                                Arguments

                                :: TensorType t 
                                => OpParams 
                                -> Tensor v'1 t

                                x: a tensor of type T.

                                -> Tensor Build t

                                y: a tensor of the same shape and type as x but filled with zeros.

                                zeta

                                Arguments

                                :: OneOf `[Double, Float]` t 
                                => Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                q

                                -> Tensor Build t

                                z

                                Compute the Hurwitz zeta function \(zeta(x, q)\).

                                The Hurwitz zeta function is defined as:

                                ``` - zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} - ```

                                zeta'

                                Arguments

                                :: OneOf `[Double, Float]` t 
                                => OpParams 
                                -> Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                q

                                -> Tensor Build t

                                z

                                _Arg

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => Int64

                                index: This argument is the index-th argument of the function.

                                -> m' (Tensor Value t)

                                output: The argument.

                                A graph node which represents an argument to a function.

                                _Arg'

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Int64

                                index: This argument is the index-th argument of the function.

                                -> m' (Tensor Value t)

                                output: The argument.

                                _ArrayToList

                                Arguments

                                :: (TensorType t, TensorTypes out_types) 
                                => [Tensor v'1 t]

                                input

                                -> TensorList Build out_types

                                output

                                Converts an array of tensors to a list of tensors.

                                _ArrayToList'

                                Arguments

                                :: (TensorType t, TensorTypes out_types) 
                                => OpParams 
                                -> [Tensor v'1 t]

                                input

                                -> TensorList Build out_types

                                output

                                _HostCast

                                Arguments

                                :: (TensorType srcT, TensorType dstT) 
                                => Tensor v'1 srcT

                                x

                                -> Tensor Build dstT

                                y

                                Cast x of type SrcT to y of DstT.

                                _HostCast requires its input and produces its output in host memory.

                                _HostCast'

                                Arguments

                                :: (TensorType srcT, TensorType dstT) 
                                => OpParams 
                                -> Tensor v'1 srcT

                                x

                                -> Tensor Build dstT

                                y

                                _HostRecv

                                Arguments

                                :: (MonadBuild m', TensorType tensor_type) 
                                => Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> m' (Tensor Value tensor_type)

                                tensor: The tensor to receive.

                                Receives the named tensor from send_device on recv_device.

                                _HostRecv requires its input on host memory whereas _Recv requires its - input on device memory.

                                _HostRecv'

                                Arguments

                                :: (MonadBuild m', TensorType tensor_type) 
                                => OpParams 
                                -> Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> m' (Tensor Value tensor_type)

                                tensor: The tensor to receive.

                                _HostSend

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> Tensor v'1 t

                                tensor: The tensor to send.

                                -> m' ControlNode 

                                Sends the named tensor from send_device to recv_device.

                                _HostSend requires its input on host memory whereas _Send requires its - input on device memory.

                                _HostSend'

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> Tensor v'1 t

                                tensor: The tensor to send.

                                -> m' ControlNode 

                                _ListToArray

                                Arguments

                                :: (TensorTypes tin, TensorType t) 
                                => Int64

                                N

                                -> TensorList v'1 tin

                                input

                                -> [Tensor Build t]

                                output

                                Converts a list of tensors to an array of tensors.

                                _ListToArray'

                                Arguments

                                :: (TensorTypes tin, TensorType t) 
                                => OpParams 
                                -> Int64

                                N

                                -> TensorList v'1 tin

                                input

                                -> [Tensor Build t]

                                output

                                _ParallelConcatStart

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => Shape

                                shape: 1-D Tensor indicating the shape of the output.

                                -> m' (Tensor Value dtype)

                                output: An empty Tensor of the specified type.

                                Creates an empty Tensor with shape shape and type dtype.

                                The memory can optionally be initialized. This is usually useful in - conjunction with inplace operations.

                                _ParallelConcatStart'

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => OpParams 
                                -> Shape

                                shape: 1-D Tensor indicating the shape of the output.

                                -> m' (Tensor Value dtype)

                                output: An empty Tensor of the specified type.

                                _ParallelConcatUpdate

                                Arguments

                                :: TensorType t 
                                => Int64

                                loc: A scalar indicating the index of the first dimension such that - value[loc, :] is updated.

                                -> Tensor v'1 t

                                value: A Tensor object that will be updated in-place.

                                -> Tensor v'2 t

                                update: A Tensor of rank one less than value if loc is a scalar, - otherwise of rank equal to value that contains the new values - for value.

                                -> Tensor Build t

                                output: value that has been updated accordingly.

                                Updates input value at loc with update.

                                If you use this function you will almost certainly want to add + ```

                                where'' Source #

                                Arguments

                                :: OpParams 
                                -> Tensor v'1 Bool

                                input

                                -> Tensor Build Int64

                                index

                                wholeFileReader Source #

                                Arguments

                                :: MonadBuild m' 
                                => m' (Tensor Ref ByteString)

                                reader_handle: The handle to reference the Reader.

                                A Reader that outputs the entire contents of a file as a value.

                                To use, enqueue filenames in a Queue. The output of ReaderRead will + be a filename (key) and the contents of that file (value).

                                wholeFileReader' Source #

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> m' (Tensor Ref ByteString)

                                reader_handle: The handle to reference the Reader.

                                wholeFileReaderV2 Source #

                                Arguments

                                :: MonadBuild m' 
                                => m' (Tensor Value ResourceHandle)

                                reader_handle: The handle to reference the Reader.

                                A Reader that outputs the entire contents of a file as a value.

                                To use, enqueue filenames in a Queue. The output of ReaderRead will + be a filename (key) and the contents of that file (value).

                                wholeFileReaderV2' Source #

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> m' (Tensor Value ResourceHandle)

                                reader_handle: The handle to reference the Reader.

                                writeFile Source #

                                Arguments

                                :: MonadBuild m' 
                                => Tensor v'1 ByteString

                                filename: scalar. The name of the file to which we write the contents.

                                -> Tensor v'2 ByteString

                                contents: scalar. The content to be written to the output file.

                                -> m' ControlNode 

                                Writes contents to the file at input filename. Creates file and recursively

                                creates directory if not existing.

                                writeFile' Source #

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> Tensor v'1 ByteString

                                filename: scalar. The name of the file to which we write the contents.

                                -> Tensor v'2 ByteString

                                contents: scalar. The content to be written to the output file.

                                -> m' ControlNode 

                                zerosLike Source #

                                Arguments

                                :: TensorType t 
                                => Tensor v'1 t

                                x: a tensor of type T.

                                -> Tensor Build t

                                y: a tensor of the same shape and type as x but filled with zeros.

                                Returns a tensor of zeros with the same shape and type as x.

                                zerosLike' Source #

                                Arguments

                                :: TensorType t 
                                => OpParams 
                                -> Tensor v'1 t

                                x: a tensor of type T.

                                -> Tensor Build t

                                y: a tensor of the same shape and type as x but filled with zeros.

                                zeta Source #

                                Arguments

                                :: OneOf '[Double, Float] t 
                                => Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                q

                                -> Tensor Build t

                                z

                                Compute the Hurwitz zeta function \(zeta(x, q)\).

                                The Hurwitz zeta function is defined as:

                                \(zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x}\)

                                zeta' Source #

                                Arguments

                                :: OneOf '[Double, Float] t 
                                => OpParams 
                                -> Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                q

                                -> Tensor Build t

                                z

                                zipDataset Source #

                                Arguments

                                :: MonadBuild m' 
                                => [DataType]

                                output_types

                                -> [Tensor v'1 ResourceHandle]

                                input_datasets

                                -> m' (Tensor Value ResourceHandle)

                                handle

                                Creates a dataset that zips together input_datasets.

                                zipDataset' Source #

                                Arguments

                                :: MonadBuild m' 
                                => OpParams 
                                -> [DataType]

                                output_types

                                -> [Tensor v'1 ResourceHandle]

                                input_datasets

                                -> m' (Tensor Value ResourceHandle)

                                handle

                                _Arg Source #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => Int64

                                index: This argument is the index-th argument of the function.

                                -> m' (Tensor Value t)

                                output: The argument.

                                A graph node which represents an argument to a function.

                                _Arg' Source #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Int64

                                index: This argument is the index-th argument of the function.

                                -> m' (Tensor Value t)

                                output: The argument.

                                _ArrayToList Source #

                                Arguments

                                :: (TensorType t, TensorTypes out_types) 
                                => [Tensor v'1 t]

                                input

                                -> TensorList Build out_types

                                output

                                Converts an array of tensors to a list of tensors.

                                _ArrayToList' Source #

                                Arguments

                                :: (TensorType t, TensorTypes out_types) 
                                => OpParams 
                                -> [Tensor v'1 t]

                                input

                                -> TensorList Build out_types

                                output

                                _HostCast Source #

                                Arguments

                                :: (TensorType srcT, TensorType dstT) 
                                => Tensor v'1 srcT

                                x

                                -> Tensor Build dstT

                                y

                                Cast x of type SrcT to y of DstT.

                                _HostCast requires its input and produces its output in host memory.

                                _HostCast' Source #

                                Arguments

                                :: (TensorType srcT, TensorType dstT) 
                                => OpParams 
                                -> Tensor v'1 srcT

                                x

                                -> Tensor Build dstT

                                y

                                _HostRecv Source #

                                Arguments

                                :: (MonadBuild m', TensorType tensor_type) 
                                => Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> m' (Tensor Value tensor_type)

                                tensor: The tensor to receive.

                                Receives the named tensor from send_device on recv_device.

                                _HostRecv requires its input on host memory whereas _Recv requires its + input on device memory.

                                _HostRecv' Source #

                                Arguments

                                :: (MonadBuild m', TensorType tensor_type) 
                                => OpParams 
                                -> Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> m' (Tensor Value tensor_type)

                                tensor: The tensor to receive.

                                _HostSend Source #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> Tensor v'1 t

                                tensor: The tensor to send.

                                -> m' ControlNode 

                                Sends the named tensor from send_device to recv_device.

                                _HostSend requires its input on host memory whereas _Send requires its + input on device memory.

                                _HostSend' Source #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> Tensor v'1 t

                                tensor: The tensor to send.

                                -> m' ControlNode 

                                _ListToArray Source #

                                Arguments

                                :: (TensorTypes tin, TensorType t) 
                                => Int64

                                N

                                -> TensorList v'1 tin

                                input

                                -> [Tensor Build t]

                                output

                                Converts a list of tensors to an array of tensors.

                                _ListToArray' Source #

                                Arguments

                                :: (TensorTypes tin, TensorType t) 
                                => OpParams 
                                -> Int64

                                N

                                -> TensorList v'1 tin

                                input

                                -> [Tensor Build t]

                                output

                                _ParallelConcatStart Source #

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => Shape

                                shape: 1-D Tensor indicating the shape of the output.

                                -> m' (Tensor Value dtype)

                                output: An empty Tensor of the specified type.

                                Creates an empty Tensor with shape shape and type dtype.

                                The memory can optionally be initialized. This is usually useful in + conjunction with inplace operations.

                                _ParallelConcatStart' Source #

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => OpParams 
                                -> Shape

                                shape: 1-D Tensor indicating the shape of the output.

                                -> m' (Tensor Value dtype)

                                output: An empty Tensor of the specified type.

                                _ParallelConcatUpdate Source #

                                Arguments

                                :: TensorType t 
                                => Int64

                                loc: A scalar indicating the index of the first dimension such that + value[loc, :] is updated.

                                -> Tensor v'1 t

                                value: A Tensor object that will be updated in-place.

                                -> Tensor v'2 t

                                update: A Tensor of rank one less than value if loc is a scalar, + otherwise of rank equal to value that contains the new values + for value.

                                -> Tensor Build t

                                output: value that has been updated accordingly.

                                Updates input value at loc with update.

                                If you use this function you will almost certainly want to add a control dependency as done in the implementation of parallel_stack to - avoid race conditions.

                                _ParallelConcatUpdate'

                                Arguments

                                :: TensorType t 
                                => OpParams 
                                -> Int64

                                loc: A scalar indicating the index of the first dimension such that - value[loc, :] is updated.

                                -> Tensor v'1 t

                                value: A Tensor object that will be updated in-place.

                                -> Tensor v'2 t

                                update: A Tensor of rank one less than value if loc is a scalar, - otherwise of rank equal to value that contains the new values - for value.

                                -> Tensor Build t

                                output: value that has been updated accordingly.

                                _Recv

                                Arguments

                                :: (MonadBuild m', TensorType tensor_type) 
                                => Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> m' (Tensor Value tensor_type)

                                tensor: The tensor to receive.

                                Receives the named tensor from send_device on recv_device.

                                _Recv'

                                Arguments

                                :: (MonadBuild m', TensorType tensor_type) 
                                => OpParams 
                                -> Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> m' (Tensor Value tensor_type)

                                tensor: The tensor to receive.

                                _Retval

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => Int64

                                index: This return value is the index-th return value of the function.

                                -> Tensor v'1 t

                                input: The return value.

                                -> m' ControlNode 

                                A graph node which represents a return value of a function.

                                _Retval'

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Int64

                                index: This return value is the index-th return value of the function.

                                -> Tensor v'1 t

                                input: The return value.

                                -> m' ControlNode 

                                _Send

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> Tensor v'1 t

                                tensor: The tensor to send.

                                -> m' ControlNode 

                                Sends the named tensor from send_device to recv_device.

                                _Send'

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> Tensor v'1 t

                                tensor: The tensor to send.

                                -> m' ControlNode 
                                \ No newline at end of file + avoid race conditions.

                                _ParallelConcatUpdate' Source #

                                Arguments

                                :: TensorType t 
                                => OpParams 
                                -> Int64

                                loc: A scalar indicating the index of the first dimension such that + value[loc, :] is updated.

                                -> Tensor v'1 t

                                value: A Tensor object that will be updated in-place.

                                -> Tensor v'2 t

                                update: A Tensor of rank one less than value if loc is a scalar, + otherwise of rank equal to value that contains the new values + for value.

                                -> Tensor Build t

                                output: value that has been updated accordingly.

                                _Recv Source #

                                Arguments

                                :: (MonadBuild m', TensorType tensor_type) 
                                => Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> m' (Tensor Value tensor_type)

                                tensor: The tensor to receive.

                                Receives the named tensor from send_device on recv_device.

                                _Recv' Source #

                                Arguments

                                :: (MonadBuild m', TensorType tensor_type) 
                                => OpParams 
                                -> Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> m' (Tensor Value tensor_type)

                                tensor: The tensor to receive.

                                _Retval Source #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => Int64

                                index: This return value is the index-th return value of the function.

                                -> Tensor v'1 t

                                input: The return value.

                                -> m' ControlNode 

                                A graph node which represents a return value of a function.

                                _Retval' Source #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Int64

                                index: This return value is the index-th return value of the function.

                                -> Tensor v'1 t

                                input: The return value.

                                -> m' ControlNode 

                                _Send Source #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> Tensor v'1 t

                                tensor: The tensor to send.

                                -> m' ControlNode 

                                Sends the named tensor from send_device to recv_device.

                                _Send' Source #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Int64

                                send_device_incarnation: The current incarnation of send_device.

                                -> Tensor v'1 t

                                tensor: The tensor to send.

                                -> m' ControlNode 

                                _UnsafeReadVariable Source #

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => Tensor v'1 ResourceHandle

                                resource: handle to the resource in which to store the variable.

                                -> m' (Tensor Value dtype)

                                value

                                Reads the value of a variable without any memory model.

                                The tensor returned by this operation aliases a mutable Tensor, and its value + can be observed to be different by different ops.

                                Internal and private to the tensorflow implementation.

                                _UnsafeReadVariable' Source #

                                Arguments

                                :: (MonadBuild m', TensorType dtype) 
                                => OpParams 
                                -> Tensor v'1 ResourceHandle

                                resource: handle to the resource in which to store the variable.

                                -> m' (Tensor Value dtype)

                                value

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html index 47418de..2bf0f12 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - _)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html index 7a8b0a6..ecd73ca 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - A)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - A

                                abortTensorFlow.GenOps.Core
                                abort'TensorFlow.GenOps.Core
                                absTensorFlow.GenOps.Core
                                abs'TensorFlow.GenOps.Core
                                accumulatorApplyGradientTensorFlow.GenOps.Core
                                accumulatorApplyGradient'TensorFlow.GenOps.Core
                                accumulatorNumAccumulatedTensorFlow.GenOps.Core
                                accumulatorNumAccumulated'TensorFlow.GenOps.Core
                                accumulatorSetGlobalStepTensorFlow.GenOps.Core
                                accumulatorSetGlobalStep'TensorFlow.GenOps.Core
                                accumulatorTakeGradientTensorFlow.GenOps.Core
                                accumulatorTakeGradient'TensorFlow.GenOps.Core
                                acosTensorFlow.GenOps.Core
                                acos'TensorFlow.GenOps.Core
                                addTensorFlow.GenOps.Core
                                add'TensorFlow.GenOps.Core
                                addManySparseToTensorsMapTensorFlow.GenOps.Core
                                addManySparseToTensorsMap'TensorFlow.GenOps.Core
                                addNTensorFlow.GenOps.Core
                                addN'TensorFlow.GenOps.Core
                                addSparseToTensorsMapTensorFlow.GenOps.Core
                                addSparseToTensorsMap'TensorFlow.GenOps.Core
                                adjustContrastTensorFlow.GenOps.Core
                                adjustContrast'TensorFlow.GenOps.Core
                                adjustContrastv2TensorFlow.GenOps.Core
                                adjustContrastv2'TensorFlow.GenOps.Core
                                adjustHueTensorFlow.GenOps.Core
                                adjustHue'TensorFlow.GenOps.Core
                                adjustSaturationTensorFlow.GenOps.Core
                                adjustSaturation'TensorFlow.GenOps.Core
                                allTensorFlow.GenOps.Core
                                all'TensorFlow.GenOps.Core
                                allCandidateSamplerTensorFlow.GenOps.Core
                                allCandidateSampler'TensorFlow.GenOps.Core
                                anyTensorFlow.GenOps.Core
                                any'TensorFlow.GenOps.Core
                                applyAdadeltaTensorFlow.GenOps.Core
                                applyAdadelta'TensorFlow.GenOps.Core
                                applyAdagradTensorFlow.GenOps.Core
                                applyAdagrad'TensorFlow.GenOps.Core
                                applyAdagradDATensorFlow.GenOps.Core
                                applyAdagradDA'TensorFlow.GenOps.Core
                                applyAdamTensorFlow.GenOps.Core
                                applyAdam'TensorFlow.GenOps.Core
                                applyCenteredRMSPropTensorFlow.GenOps.Core
                                applyCenteredRMSProp'TensorFlow.GenOps.Core
                                applyFtrlTensorFlow.GenOps.Core
                                applyFtrl'TensorFlow.GenOps.Core
                                applyGradientDescentTensorFlow.GenOps.Core
                                applyGradientDescent'TensorFlow.GenOps.Core
                                applyMomentumTensorFlow.GenOps.Core
                                applyMomentum'TensorFlow.GenOps.Core
                                applyProximalAdagradTensorFlow.GenOps.Core
                                applyProximalAdagrad'TensorFlow.GenOps.Core
                                applyProximalGradientDescentTensorFlow.GenOps.Core
                                applyProximalGradientDescent'TensorFlow.GenOps.Core
                                applyRMSPropTensorFlow.GenOps.Core
                                applyRMSProp'TensorFlow.GenOps.Core
                                argMaxTensorFlow.GenOps.Core
                                argMax'TensorFlow.GenOps.Core
                                argMinTensorFlow.GenOps.Core
                                argMin'TensorFlow.GenOps.Core
                                asinTensorFlow.GenOps.Core
                                asin'TensorFlow.GenOps.Core
                                assertTensorFlow.GenOps.Core
                                assert'TensorFlow.GenOps.Core
                                assignTensorFlow.GenOps.Core
                                assign'TensorFlow.GenOps.Core
                                assignAddTensorFlow.GenOps.Core
                                assignAdd'TensorFlow.GenOps.Core
                                assignAddVariableOpTensorFlow.GenOps.Core
                                assignAddVariableOp'TensorFlow.GenOps.Core
                                assignSubTensorFlow.GenOps.Core
                                assignSub'TensorFlow.GenOps.Core
                                assignVariableOpTensorFlow.GenOps.Core
                                assignVariableOp'TensorFlow.GenOps.Core
                                asStringTensorFlow.GenOps.Core
                                asString'TensorFlow.GenOps.Core
                                atanTensorFlow.GenOps.Core
                                atan'TensorFlow.GenOps.Core
                                audioSummaryTensorFlow.GenOps.Core
                                audioSummary'TensorFlow.GenOps.Core
                                audioSummaryV2TensorFlow.GenOps.Core
                                audioSummaryV2'TensorFlow.GenOps.Core
                                avgPoolTensorFlow.GenOps.Core
                                avgPool'TensorFlow.GenOps.Core
                                avgPool3DTensorFlow.GenOps.Core
                                avgPool3D'TensorFlow.GenOps.Core
                                avgPool3DGradTensorFlow.GenOps.Core
                                avgPool3DGrad'TensorFlow.GenOps.Core
                                avgPoolGradTensorFlow.GenOps.Core
                                avgPoolGrad'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - A

                                abortTensorFlow.GenOps.Core
                                abort'TensorFlow.GenOps.Core
                                absTensorFlow.GenOps.Core
                                abs'TensorFlow.GenOps.Core
                                accumulatorApplyGradientTensorFlow.GenOps.Core
                                accumulatorApplyGradient'TensorFlow.GenOps.Core
                                accumulatorNumAccumulatedTensorFlow.GenOps.Core
                                accumulatorNumAccumulated'TensorFlow.GenOps.Core
                                accumulatorSetGlobalStepTensorFlow.GenOps.Core
                                accumulatorSetGlobalStep'TensorFlow.GenOps.Core
                                accumulatorTakeGradientTensorFlow.GenOps.Core
                                accumulatorTakeGradient'TensorFlow.GenOps.Core
                                acosTensorFlow.GenOps.Core
                                acos'TensorFlow.GenOps.Core
                                acoshTensorFlow.GenOps.Core
                                acosh'TensorFlow.GenOps.Core
                                addTensorFlow.GenOps.Core
                                add'TensorFlow.GenOps.Core
                                addManySparseToTensorsMapTensorFlow.GenOps.Core
                                addManySparseToTensorsMap'TensorFlow.GenOps.Core
                                addNTensorFlow.GenOps.Core
                                addN'TensorFlow.GenOps.Core
                                addSparseToTensorsMapTensorFlow.GenOps.Core
                                addSparseToTensorsMap'TensorFlow.GenOps.Core
                                adjustContrastTensorFlow.GenOps.Core
                                adjustContrast'TensorFlow.GenOps.Core
                                adjustContrastv2TensorFlow.GenOps.Core
                                adjustContrastv2'TensorFlow.GenOps.Core
                                adjustHueTensorFlow.GenOps.Core
                                adjustHue'TensorFlow.GenOps.Core
                                adjustSaturationTensorFlow.GenOps.Core
                                adjustSaturation'TensorFlow.GenOps.Core
                                allTensorFlow.GenOps.Core
                                all'TensorFlow.GenOps.Core
                                allCandidateSamplerTensorFlow.GenOps.Core
                                allCandidateSampler'TensorFlow.GenOps.Core
                                anyTensorFlow.GenOps.Core
                                any'TensorFlow.GenOps.Core
                                applyAdadeltaTensorFlow.GenOps.Core
                                applyAdadelta'TensorFlow.GenOps.Core
                                applyAdagradTensorFlow.GenOps.Core
                                applyAdagrad'TensorFlow.GenOps.Core
                                applyAdagradDATensorFlow.GenOps.Core
                                applyAdagradDA'TensorFlow.GenOps.Core
                                applyAdamTensorFlow.GenOps.Core
                                applyAdam'TensorFlow.GenOps.Core
                                applyCenteredRMSPropTensorFlow.GenOps.Core
                                applyCenteredRMSProp'TensorFlow.GenOps.Core
                                applyDelayCompensatedGradientDescentTensorFlow.GenOps.Core
                                applyDelayCompensatedGradientDescent'TensorFlow.GenOps.Core
                                applyFtrlTensorFlow.GenOps.Core
                                applyFtrl'TensorFlow.GenOps.Core
                                applyFtrlV2TensorFlow.GenOps.Core
                                applyFtrlV2'TensorFlow.GenOps.Core
                                applyGradientDescentTensorFlow.GenOps.Core
                                applyGradientDescent'TensorFlow.GenOps.Core
                                applyMomentumTensorFlow.GenOps.Core
                                applyMomentum'TensorFlow.GenOps.Core
                                applyProximalAdagradTensorFlow.GenOps.Core
                                applyProximalAdagrad'TensorFlow.GenOps.Core
                                applyProximalGradientDescentTensorFlow.GenOps.Core
                                applyProximalGradientDescent'TensorFlow.GenOps.Core
                                applyRMSPropTensorFlow.GenOps.Core
                                applyRMSProp'TensorFlow.GenOps.Core
                                approximateEqualTensorFlow.GenOps.Core
                                approximateEqual'TensorFlow.GenOps.Core
                                argMaxTensorFlow.GenOps.Core
                                argMax'TensorFlow.GenOps.Core
                                argMinTensorFlow.GenOps.Core
                                argMin'TensorFlow.GenOps.Core
                                asinTensorFlow.GenOps.Core
                                asin'TensorFlow.GenOps.Core
                                asinhTensorFlow.GenOps.Core
                                asinh'TensorFlow.GenOps.Core
                                assertTensorFlow.GenOps.Core
                                assert'TensorFlow.GenOps.Core
                                assignTensorFlow.GenOps.Core
                                assign'TensorFlow.GenOps.Core
                                assignAddTensorFlow.GenOps.Core
                                assignAdd'TensorFlow.GenOps.Core
                                assignAddVariableOpTensorFlow.GenOps.Core
                                assignAddVariableOp'TensorFlow.GenOps.Core
                                assignSubTensorFlow.GenOps.Core
                                assignSub'TensorFlow.GenOps.Core
                                assignSubVariableOpTensorFlow.GenOps.Core
                                assignSubVariableOp'TensorFlow.GenOps.Core
                                assignVariableOpTensorFlow.GenOps.Core
                                assignVariableOp'TensorFlow.GenOps.Core
                                asStringTensorFlow.GenOps.Core
                                asString'TensorFlow.GenOps.Core
                                atanTensorFlow.GenOps.Core
                                atan'TensorFlow.GenOps.Core
                                atan2TensorFlow.GenOps.Core
                                atan2'TensorFlow.GenOps.Core
                                atanhTensorFlow.GenOps.Core
                                atanh'TensorFlow.GenOps.Core
                                audioSpectrogramTensorFlow.GenOps.Core
                                audioSpectrogram'TensorFlow.GenOps.Core
                                audioSummaryTensorFlow.GenOps.Core
                                audioSummary'TensorFlow.GenOps.Core
                                audioSummaryV2TensorFlow.GenOps.Core
                                audioSummaryV2'TensorFlow.GenOps.Core
                                avgPoolTensorFlow.GenOps.Core
                                avgPool'TensorFlow.GenOps.Core
                                avgPool3DTensorFlow.GenOps.Core
                                avgPool3D'TensorFlow.GenOps.Core
                                avgPool3DGradTensorFlow.GenOps.Core
                                avgPool3DGrad'TensorFlow.GenOps.Core
                                avgPoolGradTensorFlow.GenOps.Core
                                avgPoolGrad'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html index ed2064d..f28755a 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index

                                abortTensorFlow.GenOps.Core
                                abort'TensorFlow.GenOps.Core
                                absTensorFlow.GenOps.Core
                                abs'TensorFlow.GenOps.Core
                                accumulatorApplyGradientTensorFlow.GenOps.Core
                                accumulatorApplyGradient'TensorFlow.GenOps.Core
                                accumulatorNumAccumulatedTensorFlow.GenOps.Core
                                accumulatorNumAccumulated'TensorFlow.GenOps.Core
                                accumulatorSetGlobalStepTensorFlow.GenOps.Core
                                accumulatorSetGlobalStep'TensorFlow.GenOps.Core
                                accumulatorTakeGradientTensorFlow.GenOps.Core
                                accumulatorTakeGradient'TensorFlow.GenOps.Core
                                acosTensorFlow.GenOps.Core
                                acos'TensorFlow.GenOps.Core
                                addTensorFlow.GenOps.Core
                                add'TensorFlow.GenOps.Core
                                addManySparseToTensorsMapTensorFlow.GenOps.Core
                                addManySparseToTensorsMap'TensorFlow.GenOps.Core
                                addNTensorFlow.GenOps.Core
                                addN'TensorFlow.GenOps.Core
                                addSparseToTensorsMapTensorFlow.GenOps.Core
                                addSparseToTensorsMap'TensorFlow.GenOps.Core
                                adjustContrastTensorFlow.GenOps.Core
                                adjustContrast'TensorFlow.GenOps.Core
                                adjustContrastv2TensorFlow.GenOps.Core
                                adjustContrastv2'TensorFlow.GenOps.Core
                                adjustHueTensorFlow.GenOps.Core
                                adjustHue'TensorFlow.GenOps.Core
                                adjustSaturationTensorFlow.GenOps.Core
                                adjustSaturation'TensorFlow.GenOps.Core
                                allTensorFlow.GenOps.Core
                                all'TensorFlow.GenOps.Core
                                allCandidateSamplerTensorFlow.GenOps.Core
                                allCandidateSampler'TensorFlow.GenOps.Core
                                anyTensorFlow.GenOps.Core
                                any'TensorFlow.GenOps.Core
                                applyAdadeltaTensorFlow.GenOps.Core
                                applyAdadelta'TensorFlow.GenOps.Core
                                applyAdagradTensorFlow.GenOps.Core
                                applyAdagrad'TensorFlow.GenOps.Core
                                applyAdagradDATensorFlow.GenOps.Core
                                applyAdagradDA'TensorFlow.GenOps.Core
                                applyAdamTensorFlow.GenOps.Core
                                applyAdam'TensorFlow.GenOps.Core
                                applyCenteredRMSPropTensorFlow.GenOps.Core
                                applyCenteredRMSProp'TensorFlow.GenOps.Core
                                applyFtrlTensorFlow.GenOps.Core
                                applyFtrl'TensorFlow.GenOps.Core
                                applyGradientDescentTensorFlow.GenOps.Core
                                applyGradientDescent'TensorFlow.GenOps.Core
                                applyMomentumTensorFlow.GenOps.Core
                                applyMomentum'TensorFlow.GenOps.Core
                                applyProximalAdagradTensorFlow.GenOps.Core
                                applyProximalAdagrad'TensorFlow.GenOps.Core
                                applyProximalGradientDescentTensorFlow.GenOps.Core
                                applyProximalGradientDescent'TensorFlow.GenOps.Core
                                applyRMSPropTensorFlow.GenOps.Core
                                applyRMSProp'TensorFlow.GenOps.Core
                                argMaxTensorFlow.GenOps.Core
                                argMax'TensorFlow.GenOps.Core
                                argMinTensorFlow.GenOps.Core
                                argMin'TensorFlow.GenOps.Core
                                asinTensorFlow.GenOps.Core
                                asin'TensorFlow.GenOps.Core
                                assertTensorFlow.GenOps.Core
                                assert'TensorFlow.GenOps.Core
                                assignTensorFlow.GenOps.Core
                                assign'TensorFlow.GenOps.Core
                                assignAddTensorFlow.GenOps.Core
                                assignAdd'TensorFlow.GenOps.Core
                                assignAddVariableOpTensorFlow.GenOps.Core
                                assignAddVariableOp'TensorFlow.GenOps.Core
                                assignSubTensorFlow.GenOps.Core
                                assignSub'TensorFlow.GenOps.Core
                                assignVariableOpTensorFlow.GenOps.Core
                                assignVariableOp'TensorFlow.GenOps.Core
                                asStringTensorFlow.GenOps.Core
                                asString'TensorFlow.GenOps.Core
                                atanTensorFlow.GenOps.Core
                                atan'TensorFlow.GenOps.Core
                                audioSummaryTensorFlow.GenOps.Core
                                audioSummary'TensorFlow.GenOps.Core
                                audioSummaryV2TensorFlow.GenOps.Core
                                audioSummaryV2'TensorFlow.GenOps.Core
                                avgPoolTensorFlow.GenOps.Core
                                avgPool'TensorFlow.GenOps.Core
                                avgPool3DTensorFlow.GenOps.Core
                                avgPool3D'TensorFlow.GenOps.Core
                                avgPool3DGradTensorFlow.GenOps.Core
                                avgPool3DGrad'TensorFlow.GenOps.Core
                                avgPoolGradTensorFlow.GenOps.Core
                                avgPoolGrad'TensorFlow.GenOps.Core
                                barrierTensorFlow.GenOps.Core
                                barrier'TensorFlow.GenOps.Core
                                barrierCloseTensorFlow.GenOps.Core
                                barrierClose'TensorFlow.GenOps.Core
                                barrierIncompleteSizeTensorFlow.GenOps.Core
                                barrierIncompleteSize'TensorFlow.GenOps.Core
                                barrierInsertManyTensorFlow.GenOps.Core
                                barrierInsertMany'TensorFlow.GenOps.Core
                                barrierReadySizeTensorFlow.GenOps.Core
                                barrierReadySize'TensorFlow.GenOps.Core
                                barrierTakeManyTensorFlow.GenOps.Core
                                barrierTakeMany'TensorFlow.GenOps.Core
                                batchCholeskyTensorFlow.GenOps.Core
                                batchCholesky'TensorFlow.GenOps.Core
                                batchCholeskyGradTensorFlow.GenOps.Core
                                batchCholeskyGrad'TensorFlow.GenOps.Core
                                batchFFTTensorFlow.GenOps.Core
                                batchFFT'TensorFlow.GenOps.Core
                                batchFFT2DTensorFlow.GenOps.Core
                                batchFFT2D'TensorFlow.GenOps.Core
                                batchFFT3DTensorFlow.GenOps.Core
                                batchFFT3D'TensorFlow.GenOps.Core
                                batchIFFTTensorFlow.GenOps.Core
                                batchIFFT'TensorFlow.GenOps.Core
                                batchIFFT2DTensorFlow.GenOps.Core
                                batchIFFT2D'TensorFlow.GenOps.Core
                                batchIFFT3DTensorFlow.GenOps.Core
                                batchIFFT3D'TensorFlow.GenOps.Core
                                batchMatMulTensorFlow.GenOps.Core
                                batchMatMul'TensorFlow.GenOps.Core
                                batchMatrixBandPartTensorFlow.GenOps.Core
                                batchMatrixBandPart'TensorFlow.GenOps.Core
                                batchMatrixDeterminantTensorFlow.GenOps.Core
                                batchMatrixDeterminant'TensorFlow.GenOps.Core
                                batchMatrixDiagTensorFlow.GenOps.Core
                                batchMatrixDiag'TensorFlow.GenOps.Core
                                batchMatrixDiagPartTensorFlow.GenOps.Core
                                batchMatrixDiagPart'TensorFlow.GenOps.Core
                                batchMatrixInverseTensorFlow.GenOps.Core
                                batchMatrixInverse'TensorFlow.GenOps.Core
                                batchMatrixSetDiagTensorFlow.GenOps.Core
                                batchMatrixSetDiag'TensorFlow.GenOps.Core
                                batchMatrixSolveTensorFlow.GenOps.Core
                                batchMatrixSolve'TensorFlow.GenOps.Core
                                batchMatrixSolveLsTensorFlow.GenOps.Core
                                batchMatrixSolveLs'TensorFlow.GenOps.Core
                                batchMatrixTriangularSolveTensorFlow.GenOps.Core
                                batchMatrixTriangularSolve'TensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
                                batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
                                batchSelfAdjointEigTensorFlow.GenOps.Core
                                batchSelfAdjointEig'TensorFlow.GenOps.Core
                                batchSelfAdjointEigV2TensorFlow.GenOps.Core
                                batchSelfAdjointEigV2'TensorFlow.GenOps.Core
                                batchSvdTensorFlow.GenOps.Core
                                batchSvd'TensorFlow.GenOps.Core
                                batchToSpaceTensorFlow.GenOps.Core
                                batchToSpace'TensorFlow.GenOps.Core
                                batchToSpaceNDTensorFlow.GenOps.Core
                                batchToSpaceND'TensorFlow.GenOps.Core
                                betaincTensorFlow.GenOps.Core
                                betainc'TensorFlow.GenOps.Core
                                biasAddTensorFlow.GenOps.Core
                                biasAdd'TensorFlow.GenOps.Core
                                biasAddGradTensorFlow.GenOps.Core
                                biasAddGrad'TensorFlow.GenOps.Core
                                biasAddV1TensorFlow.GenOps.Core
                                biasAddV1'TensorFlow.GenOps.Core
                                bitcastTensorFlow.GenOps.Core
                                bitcast'TensorFlow.GenOps.Core
                                broadcastArgsTensorFlow.GenOps.Core
                                broadcastArgs'TensorFlow.GenOps.Core
                                broadcastGradientArgsTensorFlow.GenOps.Core
                                broadcastGradientArgs'TensorFlow.GenOps.Core
                                castTensorFlow.GenOps.Core
                                cast'TensorFlow.GenOps.Core
                                ceilTensorFlow.GenOps.Core
                                ceil'TensorFlow.GenOps.Core
                                checkNumericsTensorFlow.GenOps.Core
                                checkNumerics'TensorFlow.GenOps.Core
                                choleskyTensorFlow.GenOps.Core
                                cholesky'TensorFlow.GenOps.Core
                                choleskyGradTensorFlow.GenOps.Core
                                choleskyGrad'TensorFlow.GenOps.Core
                                complexTensorFlow.GenOps.Core
                                complex'TensorFlow.GenOps.Core
                                complexAbsTensorFlow.GenOps.Core
                                complexAbs'TensorFlow.GenOps.Core
                                computeAccidentalHitsTensorFlow.GenOps.Core
                                computeAccidentalHits'TensorFlow.GenOps.Core
                                concatTensorFlow.GenOps.Core
                                concat'TensorFlow.GenOps.Core
                                concatOffsetTensorFlow.GenOps.Core
                                concatOffset'TensorFlow.GenOps.Core
                                concatV2TensorFlow.GenOps.Core
                                concatV2'TensorFlow.GenOps.Core
                                conditionalAccumulatorTensorFlow.GenOps.Core
                                conditionalAccumulator'TensorFlow.GenOps.Core
                                conjTensorFlow.GenOps.Core
                                conj'TensorFlow.GenOps.Core
                                constTensorFlow.GenOps.Core
                                const'TensorFlow.GenOps.Core
                                controlTriggerTensorFlow.GenOps.Core
                                controlTrigger'TensorFlow.GenOps.Core
                                conv2DTensorFlow.GenOps.Core
                                conv2D'TensorFlow.GenOps.Core
                                conv2DBackpropFilterTensorFlow.GenOps.Core
                                conv2DBackpropFilter'TensorFlow.GenOps.Core
                                conv2DBackpropInputTensorFlow.GenOps.Core
                                conv2DBackpropInput'TensorFlow.GenOps.Core
                                conv3DTensorFlow.GenOps.Core
                                conv3D'TensorFlow.GenOps.Core
                                conv3DBackpropFilterTensorFlow.GenOps.Core
                                conv3DBackpropFilter'TensorFlow.GenOps.Core
                                conv3DBackpropFilterV2TensorFlow.GenOps.Core
                                conv3DBackpropFilterV2'TensorFlow.GenOps.Core
                                conv3DBackpropInputTensorFlow.GenOps.Core
                                conv3DBackpropInput'TensorFlow.GenOps.Core
                                conv3DBackpropInputV2TensorFlow.GenOps.Core
                                conv3DBackpropInputV2'TensorFlow.GenOps.Core
                                copyTensorFlow.GenOps.Core
                                copy'TensorFlow.GenOps.Core
                                copyHostTensorFlow.GenOps.Core
                                copyHost'TensorFlow.GenOps.Core
                                cosTensorFlow.GenOps.Core
                                cos'TensorFlow.GenOps.Core
                                countUpToTensorFlow.GenOps.Core
                                countUpTo'TensorFlow.GenOps.Core
                                cropAndResizeTensorFlow.GenOps.Core
                                cropAndResize'TensorFlow.GenOps.Core
                                cropAndResizeGradBoxesTensorFlow.GenOps.Core
                                cropAndResizeGradBoxes'TensorFlow.GenOps.Core
                                cropAndResizeGradImageTensorFlow.GenOps.Core
                                cropAndResizeGradImage'TensorFlow.GenOps.Core
                                crossTensorFlow.GenOps.Core
                                cross'TensorFlow.GenOps.Core
                                cTCBeamSearchDecoderTensorFlow.GenOps.Core
                                cTCBeamSearchDecoder'TensorFlow.GenOps.Core
                                cTCGreedyDecoderTensorFlow.GenOps.Core
                                cTCGreedyDecoder'TensorFlow.GenOps.Core
                                cTCLossTensorFlow.GenOps.Core
                                cTCLoss'TensorFlow.GenOps.Core
                                cumprodTensorFlow.GenOps.Core
                                cumprod'TensorFlow.GenOps.Core
                                cumsumTensorFlow.GenOps.Core
                                cumsum'TensorFlow.GenOps.Core
                                debugIdentityTensorFlow.GenOps.Core
                                debugIdentity'TensorFlow.GenOps.Core
                                debugNanCountTensorFlow.GenOps.Core
                                debugNanCount'TensorFlow.GenOps.Core
                                debugNumericSummaryTensorFlow.GenOps.Core
                                debugNumericSummary'TensorFlow.GenOps.Core
                                decodeBase64TensorFlow.GenOps.Core
                                decodeBase64'TensorFlow.GenOps.Core
                                decodeCSVTensorFlow.GenOps.Core
                                decodeCSV'TensorFlow.GenOps.Core
                                decodeGifTensorFlow.GenOps.Core
                                decodeGif'TensorFlow.GenOps.Core
                                decodeJpegTensorFlow.GenOps.Core
                                decodeJpeg'TensorFlow.GenOps.Core
                                decodeJSONExampleTensorFlow.GenOps.Core
                                decodeJSONExample'TensorFlow.GenOps.Core
                                decodePngTensorFlow.GenOps.Core
                                decodePng'TensorFlow.GenOps.Core
                                decodeRawTensorFlow.GenOps.Core
                                decodeRaw'TensorFlow.GenOps.Core
                                deleteSessionTensorTensorFlow.GenOps.Core
                                deleteSessionTensor'TensorFlow.GenOps.Core
                                denseToDenseSetOperationTensorFlow.GenOps.Core
                                denseToDenseSetOperation'TensorFlow.GenOps.Core
                                denseToSparseSetOperationTensorFlow.GenOps.Core
                                denseToSparseSetOperation'TensorFlow.GenOps.Core
                                depthToSpaceTensorFlow.GenOps.Core
                                depthToSpace'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeTensorFlow.GenOps.Core
                                depthwiseConv2dNative'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
                                dequantizeTensorFlow.GenOps.Core
                                dequantize'TensorFlow.GenOps.Core
                                deserializeManySparseTensorFlow.GenOps.Core
                                deserializeManySparse'TensorFlow.GenOps.Core
                                destroyTemporaryVariableTensorFlow.GenOps.Core
                                destroyTemporaryVariable'TensorFlow.GenOps.Core
                                diagTensorFlow.GenOps.Core
                                diag'TensorFlow.GenOps.Core
                                diagPartTensorFlow.GenOps.Core
                                diagPart'TensorFlow.GenOps.Core
                                digammaTensorFlow.GenOps.Core
                                digamma'TensorFlow.GenOps.Core
                                dilation2DTensorFlow.GenOps.Core
                                dilation2D'TensorFlow.GenOps.Core
                                dilation2DBackpropFilterTensorFlow.GenOps.Core
                                dilation2DBackpropFilter'TensorFlow.GenOps.Core
                                dilation2DBackpropInputTensorFlow.GenOps.Core
                                dilation2DBackpropInput'TensorFlow.GenOps.Core
                                divTensorFlow.GenOps.Core
                                div'TensorFlow.GenOps.Core
                                drawBoundingBoxesTensorFlow.GenOps.Core
                                drawBoundingBoxes'TensorFlow.GenOps.Core
                                dynamicPartitionTensorFlow.GenOps.Core
                                dynamicPartition'TensorFlow.GenOps.Core
                                dynamicStitchTensorFlow.GenOps.Core
                                dynamicStitch'TensorFlow.GenOps.Core
                                editDistanceTensorFlow.GenOps.Core
                                editDistance'TensorFlow.GenOps.Core
                                eluTensorFlow.GenOps.Core
                                elu'TensorFlow.GenOps.Core
                                eluGradTensorFlow.GenOps.Core
                                eluGrad'TensorFlow.GenOps.Core
                                encodeBase64TensorFlow.GenOps.Core
                                encodeBase64'TensorFlow.GenOps.Core
                                encodeJpegTensorFlow.GenOps.Core
                                encodeJpeg'TensorFlow.GenOps.Core
                                encodePngTensorFlow.GenOps.Core
                                encodePng'TensorFlow.GenOps.Core
                                enterTensorFlow.GenOps.Core
                                enter'TensorFlow.GenOps.Core
                                equalTensorFlow.GenOps.Core
                                equal'TensorFlow.GenOps.Core
                                erfTensorFlow.GenOps.Core
                                erf'TensorFlow.GenOps.Core
                                erfcTensorFlow.GenOps.Core
                                erfc'TensorFlow.GenOps.Core
                                exitTensorFlow.GenOps.Core
                                exit'TensorFlow.GenOps.Core
                                expTensorFlow.GenOps.Core
                                exp'TensorFlow.GenOps.Core
                                expandDimsTensorFlow.GenOps.Core
                                expandDims'TensorFlow.GenOps.Core
                                expm1TensorFlow.GenOps.Core
                                expm1'TensorFlow.GenOps.Core
                                extractGlimpseTensorFlow.GenOps.Core
                                extractGlimpse'TensorFlow.GenOps.Core
                                extractImagePatchesTensorFlow.GenOps.Core
                                extractImagePatches'TensorFlow.GenOps.Core
                                factTensorFlow.GenOps.Core
                                fact'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
                                fakeQueueTensorFlow.GenOps.Core
                                fakeQueue'TensorFlow.GenOps.Core
                                fFTTensorFlow.GenOps.Core
                                fFT'TensorFlow.GenOps.Core
                                fFT2DTensorFlow.GenOps.Core
                                fFT2D'TensorFlow.GenOps.Core
                                fFT3DTensorFlow.GenOps.Core
                                fFT3D'TensorFlow.GenOps.Core
                                fIFOQueueTensorFlow.GenOps.Core
                                fIFOQueue'TensorFlow.GenOps.Core
                                fIFOQueueV2TensorFlow.GenOps.Core
                                fIFOQueueV2'TensorFlow.GenOps.Core
                                fillTensorFlow.GenOps.Core
                                fill'TensorFlow.GenOps.Core
                                fixedLengthRecordReaderTensorFlow.GenOps.Core
                                fixedLengthRecordReader'TensorFlow.GenOps.Core
                                fixedLengthRecordReaderV2TensorFlow.GenOps.Core
                                fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
                                fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
                                fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
                                floorTensorFlow.GenOps.Core
                                floor'TensorFlow.GenOps.Core
                                floorDivTensorFlow.GenOps.Core
                                floorDiv'TensorFlow.GenOps.Core
                                floorModTensorFlow.GenOps.Core
                                floorMod'TensorFlow.GenOps.Core
                                fractionalAvgPoolTensorFlow.GenOps.Core
                                fractionalAvgPool'TensorFlow.GenOps.Core
                                fractionalAvgPoolGradTensorFlow.GenOps.Core
                                fractionalAvgPoolGrad'TensorFlow.GenOps.Core
                                fractionalMaxPoolTensorFlow.GenOps.Core
                                fractionalMaxPool'TensorFlow.GenOps.Core
                                fractionalMaxPoolGradTensorFlow.GenOps.Core
                                fractionalMaxPoolGrad'TensorFlow.GenOps.Core
                                fusedBatchNormTensorFlow.GenOps.Core
                                fusedBatchNorm'TensorFlow.GenOps.Core
                                fusedBatchNormGradTensorFlow.GenOps.Core
                                fusedBatchNormGrad'TensorFlow.GenOps.Core
                                fusedPadConv2DTensorFlow.GenOps.Core
                                fusedPadConv2D'TensorFlow.GenOps.Core
                                fusedResizeAndPadConv2DTensorFlow.GenOps.Core
                                fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
                                gatherTensorFlow.GenOps.Core
                                gather'TensorFlow.GenOps.Core
                                gatherNdTensorFlow.GenOps.Core
                                gatherNd'TensorFlow.GenOps.Core
                                getSessionHandleTensorFlow.GenOps.Core
                                getSessionHandle'TensorFlow.GenOps.Core
                                getSessionTensorTensorFlow.GenOps.Core
                                getSessionTensor'TensorFlow.GenOps.Core
                                greaterTensorFlow.GenOps.Core
                                greater'TensorFlow.GenOps.Core
                                greaterEqualTensorFlow.GenOps.Core
                                greaterEqual'TensorFlow.GenOps.Core
                                hashTableTensorFlow.GenOps.Core
                                hashTable'TensorFlow.GenOps.Core
                                histogramSummaryTensorFlow.GenOps.Core
                                histogramSummary'TensorFlow.GenOps.Core
                                hSVToRGBTensorFlow.GenOps.Core
                                hSVToRGB'TensorFlow.GenOps.Core
                                identityTensorFlow.GenOps.Core
                                identity'TensorFlow.GenOps.Core
                                identityReaderTensorFlow.GenOps.Core
                                identityReader'TensorFlow.GenOps.Core
                                identityReaderV2TensorFlow.GenOps.Core
                                identityReaderV2'TensorFlow.GenOps.Core
                                iFFTTensorFlow.GenOps.Core
                                iFFT'TensorFlow.GenOps.Core
                                iFFT2DTensorFlow.GenOps.Core
                                iFFT2D'TensorFlow.GenOps.Core
                                iFFT3DTensorFlow.GenOps.Core
                                iFFT3D'TensorFlow.GenOps.Core
                                igammaTensorFlow.GenOps.Core
                                igamma'TensorFlow.GenOps.Core
                                igammacTensorFlow.GenOps.Core
                                igammac'TensorFlow.GenOps.Core
                                imagTensorFlow.GenOps.Core
                                imag'TensorFlow.GenOps.Core
                                imageSummaryTensorFlow.GenOps.Core
                                imageSummary'TensorFlow.GenOps.Core
                                immutableConstTensorFlow.GenOps.Core
                                immutableConst'TensorFlow.GenOps.Core
                                initializeTableTensorFlow.GenOps.Core
                                initializeTable'TensorFlow.GenOps.Core
                                initializeTableFromTextFileTensorFlow.GenOps.Core
                                initializeTableFromTextFile'TensorFlow.GenOps.Core
                                inTopKTensorFlow.GenOps.Core
                                inTopK'TensorFlow.GenOps.Core
                                invTensorFlow.GenOps.Core
                                inv'TensorFlow.GenOps.Core
                                invertPermutationTensorFlow.GenOps.Core
                                invertPermutation'TensorFlow.GenOps.Core
                                invGradTensorFlow.GenOps.Core
                                invGrad'TensorFlow.GenOps.Core
                                isFiniteTensorFlow.GenOps.Core
                                isFinite'TensorFlow.GenOps.Core
                                isInfTensorFlow.GenOps.Core
                                isInf'TensorFlow.GenOps.Core
                                isNanTensorFlow.GenOps.Core
                                isNan'TensorFlow.GenOps.Core
                                isVariableInitializedTensorFlow.GenOps.Core
                                isVariableInitialized'TensorFlow.GenOps.Core
                                l2LossTensorFlow.GenOps.Core
                                l2Loss'TensorFlow.GenOps.Core
                                learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
                                learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
                                lessTensorFlow.GenOps.Core
                                less'TensorFlow.GenOps.Core
                                lessEqualTensorFlow.GenOps.Core
                                lessEqual'TensorFlow.GenOps.Core
                                lgammaTensorFlow.GenOps.Core
                                lgamma'TensorFlow.GenOps.Core
                                linSpaceTensorFlow.GenOps.Core
                                linSpace'TensorFlow.GenOps.Core
                                listDiffTensorFlow.GenOps.Core
                                listDiff'TensorFlow.GenOps.Core
                                logTensorFlow.GenOps.Core
                                log'TensorFlow.GenOps.Core
                                log1pTensorFlow.GenOps.Core
                                log1p'TensorFlow.GenOps.Core
                                logicalAndTensorFlow.GenOps.Core
                                logicalAnd'TensorFlow.GenOps.Core
                                logicalNotTensorFlow.GenOps.Core
                                logicalNot'TensorFlow.GenOps.Core
                                logicalOrTensorFlow.GenOps.Core
                                logicalOr'TensorFlow.GenOps.Core
                                logSoftmaxTensorFlow.GenOps.Core
                                logSoftmax'TensorFlow.GenOps.Core
                                logUniformCandidateSamplerTensorFlow.GenOps.Core
                                logUniformCandidateSampler'TensorFlow.GenOps.Core
                                lookupTableExportTensorFlow.GenOps.Core
                                lookupTableExport'TensorFlow.GenOps.Core
                                lookupTableFindTensorFlow.GenOps.Core
                                lookupTableFind'TensorFlow.GenOps.Core
                                lookupTableImportTensorFlow.GenOps.Core
                                lookupTableImport'TensorFlow.GenOps.Core
                                lookupTableInsertTensorFlow.GenOps.Core
                                lookupTableInsert'TensorFlow.GenOps.Core
                                lookupTableSizeTensorFlow.GenOps.Core
                                lookupTableSize'TensorFlow.GenOps.Core
                                loopCondTensorFlow.GenOps.Core
                                loopCond'TensorFlow.GenOps.Core
                                lRNTensorFlow.GenOps.Core
                                lRN'TensorFlow.GenOps.Core
                                lRNGradTensorFlow.GenOps.Core
                                lRNGrad'TensorFlow.GenOps.Core
                                matchingFilesTensorFlow.GenOps.Core
                                matchingFiles'TensorFlow.GenOps.Core
                                matMulTensorFlow.GenOps.Core
                                matMul'TensorFlow.GenOps.Core
                                matrixBandPartTensorFlow.GenOps.Core
                                matrixBandPart'TensorFlow.GenOps.Core
                                matrixDeterminantTensorFlow.GenOps.Core
                                matrixDeterminant'TensorFlow.GenOps.Core
                                matrixDiagTensorFlow.GenOps.Core
                                matrixDiag'TensorFlow.GenOps.Core
                                matrixDiagPartTensorFlow.GenOps.Core
                                matrixDiagPart'TensorFlow.GenOps.Core
                                matrixInverseTensorFlow.GenOps.Core
                                matrixInverse'TensorFlow.GenOps.Core
                                matrixSetDiagTensorFlow.GenOps.Core
                                matrixSetDiag'TensorFlow.GenOps.Core
                                matrixSolveTensorFlow.GenOps.Core
                                matrixSolve'TensorFlow.GenOps.Core
                                matrixSolveLsTensorFlow.GenOps.Core
                                matrixSolveLs'TensorFlow.GenOps.Core
                                matrixTriangularSolveTensorFlow.GenOps.Core
                                matrixTriangularSolve'TensorFlow.GenOps.Core
                                maxTensorFlow.GenOps.Core
                                max'TensorFlow.GenOps.Core
                                maximumTensorFlow.GenOps.Core
                                maximum'TensorFlow.GenOps.Core
                                maxPoolTensorFlow.GenOps.Core
                                maxPool'TensorFlow.GenOps.Core
                                maxPool3DTensorFlow.GenOps.Core
                                maxPool3D'TensorFlow.GenOps.Core
                                maxPool3DGradTensorFlow.GenOps.Core
                                maxPool3DGrad'TensorFlow.GenOps.Core
                                maxPoolGradTensorFlow.GenOps.Core
                                maxPoolGrad'TensorFlow.GenOps.Core
                                maxPoolGradWithArgmaxTensorFlow.GenOps.Core
                                maxPoolGradWithArgmax'TensorFlow.GenOps.Core
                                maxPoolWithArgmaxTensorFlow.GenOps.Core
                                maxPoolWithArgmax'TensorFlow.GenOps.Core
                                meanTensorFlow.GenOps.Core
                                mean'TensorFlow.GenOps.Core
                                mergeTensorFlow.GenOps.Core
                                merge'TensorFlow.GenOps.Core
                                mergeSummaryTensorFlow.GenOps.Core
                                mergeSummary'TensorFlow.GenOps.Core
                                mergeV2CheckpointsTensorFlow.GenOps.Core
                                mergeV2Checkpoints'TensorFlow.GenOps.Core
                                minTensorFlow.GenOps.Core
                                min'TensorFlow.GenOps.Core
                                minimumTensorFlow.GenOps.Core
                                minimum'TensorFlow.GenOps.Core
                                mirrorPadTensorFlow.GenOps.Core
                                mirrorPad'TensorFlow.GenOps.Core
                                mirrorPadGradTensorFlow.GenOps.Core
                                mirrorPadGrad'TensorFlow.GenOps.Core
                                modTensorFlow.GenOps.Core
                                mod'TensorFlow.GenOps.Core
                                mulTensorFlow.GenOps.Core
                                mul'TensorFlow.GenOps.Core
                                multinomialTensorFlow.GenOps.Core
                                multinomial'TensorFlow.GenOps.Core
                                mutableDenseHashTableTensorFlow.GenOps.Core
                                mutableDenseHashTable'TensorFlow.GenOps.Core
                                mutableHashTableTensorFlow.GenOps.Core
                                mutableHashTable'TensorFlow.GenOps.Core
                                mutableHashTableOfTensorsTensorFlow.GenOps.Core
                                mutableHashTableOfTensors'TensorFlow.GenOps.Core
                                negTensorFlow.GenOps.Core
                                neg'TensorFlow.GenOps.Core
                                negTrainTensorFlow.GenOps.Core
                                negTrain'TensorFlow.GenOps.Core
                                nextIterationTensorFlow.GenOps.Core
                                nextIteration'TensorFlow.GenOps.Core
                                nonMaxSuppressionTensorFlow.GenOps.Core
                                nonMaxSuppression'TensorFlow.GenOps.Core
                                noOpTensorFlow.GenOps.Core
                                noOp'TensorFlow.GenOps.Core
                                notEqualTensorFlow.GenOps.Core
                                notEqual'TensorFlow.GenOps.Core
                                oneHotTensorFlow.GenOps.Core
                                oneHot'TensorFlow.GenOps.Core
                                packTensorFlow.GenOps.Core
                                pack'TensorFlow.GenOps.Core
                                padTensorFlow.GenOps.Core
                                pad'TensorFlow.GenOps.Core
                                paddingFIFOQueueTensorFlow.GenOps.Core
                                paddingFIFOQueue'TensorFlow.GenOps.Core
                                paddingFIFOQueueV2TensorFlow.GenOps.Core
                                paddingFIFOQueueV2'TensorFlow.GenOps.Core
                                parallelConcatTensorFlow.GenOps.Core
                                parallelConcat'TensorFlow.GenOps.Core
                                parameterizedTruncatedNormalTensorFlow.GenOps.Core
                                parameterizedTruncatedNormal'TensorFlow.GenOps.Core
                                parseExampleTensorFlow.GenOps.Core
                                parseExample'TensorFlow.GenOps.Core
                                parseSingleSequenceExampleTensorFlow.GenOps.Core
                                parseSingleSequenceExample'TensorFlow.GenOps.Core
                                parseTensorTensorFlow.GenOps.Core
                                parseTensor'TensorFlow.GenOps.Core
                                placeholderTensorFlow.GenOps.Core
                                placeholder'TensorFlow.GenOps.Core
                                placeholderV2TensorFlow.GenOps.Core
                                placeholderV2'TensorFlow.GenOps.Core
                                placeholderWithDefaultTensorFlow.GenOps.Core
                                placeholderWithDefault'TensorFlow.GenOps.Core
                                polygammaTensorFlow.GenOps.Core
                                polygamma'TensorFlow.GenOps.Core
                                powTensorFlow.GenOps.Core
                                pow'TensorFlow.GenOps.Core
                                preventGradientTensorFlow.GenOps.Core
                                preventGradient'TensorFlow.GenOps.Core
                                printTensorFlow.GenOps.Core
                                print'TensorFlow.GenOps.Core
                                priorityQueueTensorFlow.GenOps.Core
                                priorityQueue'TensorFlow.GenOps.Core
                                priorityQueueV2TensorFlow.GenOps.Core
                                priorityQueueV2'TensorFlow.GenOps.Core
                                prodTensorFlow.GenOps.Core
                                prod'TensorFlow.GenOps.Core
                                qrTensorFlow.GenOps.Core
                                qr'TensorFlow.GenOps.Core
                                quantizeAndDequantizeTensorFlow.GenOps.Core
                                quantizeAndDequantize'TensorFlow.GenOps.Core
                                quantizedAvgPoolTensorFlow.GenOps.Core
                                quantizedAvgPool'TensorFlow.GenOps.Core
                                quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
                                quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
                                quantizedBiasAddTensorFlow.GenOps.Core
                                quantizedBiasAdd'TensorFlow.GenOps.Core
                                quantizedConcatTensorFlow.GenOps.Core
                                quantizedConcat'TensorFlow.GenOps.Core
                                quantizedConv2DTensorFlow.GenOps.Core
                                quantizedConv2D'TensorFlow.GenOps.Core
                                quantizedInstanceNormTensorFlow.GenOps.Core
                                quantizedInstanceNorm'TensorFlow.GenOps.Core
                                quantizedMatMulTensorFlow.GenOps.Core
                                quantizedMatMul'TensorFlow.GenOps.Core
                                quantizedMaxPoolTensorFlow.GenOps.Core
                                quantizedMaxPool'TensorFlow.GenOps.Core
                                quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
                                quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
                                quantizedReluTensorFlow.GenOps.Core
                                quantizedRelu'TensorFlow.GenOps.Core
                                quantizedRelu6TensorFlow.GenOps.Core
                                quantizedRelu6'TensorFlow.GenOps.Core
                                quantizedReluXTensorFlow.GenOps.Core
                                quantizedReluX'TensorFlow.GenOps.Core
                                quantizedReshapeTensorFlow.GenOps.Core
                                quantizedReshape'TensorFlow.GenOps.Core
                                quantizeV2TensorFlow.GenOps.Core
                                quantizeV2'TensorFlow.GenOps.Core
                                queueCloseTensorFlow.GenOps.Core
                                queueClose'TensorFlow.GenOps.Core
                                queueCloseV2TensorFlow.GenOps.Core
                                queueCloseV2'TensorFlow.GenOps.Core
                                queueDequeueTensorFlow.GenOps.Core
                                queueDequeue'TensorFlow.GenOps.Core
                                queueDequeueManyTensorFlow.GenOps.Core
                                queueDequeueMany'TensorFlow.GenOps.Core
                                queueDequeueManyV2TensorFlow.GenOps.Core
                                queueDequeueManyV2'TensorFlow.GenOps.Core
                                queueDequeueUpToTensorFlow.GenOps.Core
                                queueDequeueUpTo'TensorFlow.GenOps.Core
                                queueDequeueUpToV2TensorFlow.GenOps.Core
                                queueDequeueUpToV2'TensorFlow.GenOps.Core
                                queueDequeueV2TensorFlow.GenOps.Core
                                queueDequeueV2'TensorFlow.GenOps.Core
                                queueEnqueueTensorFlow.GenOps.Core
                                queueEnqueue'TensorFlow.GenOps.Core
                                queueEnqueueManyTensorFlow.GenOps.Core
                                queueEnqueueMany'TensorFlow.GenOps.Core
                                queueEnqueueManyV2TensorFlow.GenOps.Core
                                queueEnqueueManyV2'TensorFlow.GenOps.Core
                                queueEnqueueV2TensorFlow.GenOps.Core
                                queueEnqueueV2'TensorFlow.GenOps.Core
                                queueSizeTensorFlow.GenOps.Core
                                queueSize'TensorFlow.GenOps.Core
                                queueSizeV2TensorFlow.GenOps.Core
                                queueSizeV2'TensorFlow.GenOps.Core
                                randomCropTensorFlow.GenOps.Core
                                randomCrop'TensorFlow.GenOps.Core
                                randomGammaTensorFlow.GenOps.Core
                                randomGamma'TensorFlow.GenOps.Core
                                randomShuffleTensorFlow.GenOps.Core
                                randomShuffle'TensorFlow.GenOps.Core
                                randomShuffleQueueTensorFlow.GenOps.Core
                                randomShuffleQueue'TensorFlow.GenOps.Core
                                randomShuffleQueueV2TensorFlow.GenOps.Core
                                randomShuffleQueueV2'TensorFlow.GenOps.Core
                                randomStandardNormalTensorFlow.GenOps.Core
                                randomStandardNormal'TensorFlow.GenOps.Core
                                randomUniformTensorFlow.GenOps.Core
                                randomUniform'TensorFlow.GenOps.Core
                                randomUniformIntTensorFlow.GenOps.Core
                                randomUniformInt'TensorFlow.GenOps.Core
                                rangeTensorFlow.GenOps.Core
                                range'TensorFlow.GenOps.Core
                                rankTensorFlow.GenOps.Core
                                rank'TensorFlow.GenOps.Core
                                readerNumRecordsProducedTensorFlow.GenOps.Core
                                readerNumRecordsProduced'TensorFlow.GenOps.Core
                                readerNumRecordsProducedV2TensorFlow.GenOps.Core
                                readerNumRecordsProducedV2'TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
                                readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
                                readerReadTensorFlow.GenOps.Core
                                readerRead'TensorFlow.GenOps.Core
                                readerReadUpToTensorFlow.GenOps.Core
                                readerReadUpTo'TensorFlow.GenOps.Core
                                readerReadUpToV2TensorFlow.GenOps.Core
                                readerReadUpToV2'TensorFlow.GenOps.Core
                                readerReadV2TensorFlow.GenOps.Core
                                readerReadV2'TensorFlow.GenOps.Core
                                readerResetTensorFlow.GenOps.Core
                                readerReset'TensorFlow.GenOps.Core
                                readerResetV2TensorFlow.GenOps.Core
                                readerResetV2'TensorFlow.GenOps.Core
                                readerRestoreStateTensorFlow.GenOps.Core
                                readerRestoreState'TensorFlow.GenOps.Core
                                readerRestoreStateV2TensorFlow.GenOps.Core
                                readerRestoreStateV2'TensorFlow.GenOps.Core
                                readerSerializeStateTensorFlow.GenOps.Core
                                readerSerializeState'TensorFlow.GenOps.Core
                                readerSerializeStateV2TensorFlow.GenOps.Core
                                readerSerializeStateV2'TensorFlow.GenOps.Core
                                readFileTensorFlow.GenOps.Core
                                readFile'TensorFlow.GenOps.Core
                                readVariableOpTensorFlow.GenOps.Core
                                readVariableOp'TensorFlow.GenOps.Core
                                realTensorFlow.GenOps.Core
                                real'TensorFlow.GenOps.Core
                                realDivTensorFlow.GenOps.Core
                                realDiv'TensorFlow.GenOps.Core
                                reciprocalTensorFlow.GenOps.Core
                                reciprocal'TensorFlow.GenOps.Core
                                reciprocalGradTensorFlow.GenOps.Core
                                reciprocalGrad'TensorFlow.GenOps.Core
                                recordInputTensorFlow.GenOps.Core
                                recordInput'TensorFlow.GenOps.Core
                                reduceJoinTensorFlow.GenOps.Core
                                reduceJoin'TensorFlow.GenOps.Core
                                refEnterTensorFlow.GenOps.Core
                                refEnter'TensorFlow.GenOps.Core
                                refExitTensorFlow.GenOps.Core
                                refExit'TensorFlow.GenOps.Core
                                refIdentityTensorFlow.GenOps.Core
                                refIdentity'TensorFlow.GenOps.Core
                                refMergeTensorFlow.GenOps.Core
                                refMerge'TensorFlow.GenOps.Core
                                refNextIterationTensorFlow.GenOps.Core
                                refNextIteration'TensorFlow.GenOps.Core
                                refSelectTensorFlow.GenOps.Core
                                refSelect'TensorFlow.GenOps.Core
                                refSwitchTensorFlow.GenOps.Core
                                refSwitch'TensorFlow.GenOps.Core
                                reluTensorFlow.GenOps.Core
                                relu'TensorFlow.GenOps.Core
                                relu6TensorFlow.GenOps.Core
                                relu6'TensorFlow.GenOps.Core
                                relu6GradTensorFlow.GenOps.Core
                                relu6Grad'TensorFlow.GenOps.Core
                                reluGradTensorFlow.GenOps.Core
                                reluGrad'TensorFlow.GenOps.Core
                                requantizationRangeTensorFlow.GenOps.Core
                                requantizationRange'TensorFlow.GenOps.Core
                                requantizeTensorFlow.GenOps.Core
                                requantize'TensorFlow.GenOps.Core
                                reshapeTensorFlow.GenOps.Core
                                reshape'TensorFlow.GenOps.Core
                                resizeAreaTensorFlow.GenOps.Core
                                resizeArea'TensorFlow.GenOps.Core
                                resizeBicubicTensorFlow.GenOps.Core
                                resizeBicubic'TensorFlow.GenOps.Core
                                resizeBilinearTensorFlow.GenOps.Core
                                resizeBilinear'TensorFlow.GenOps.Core
                                resizeBilinearGradTensorFlow.GenOps.Core
                                resizeBilinearGrad'TensorFlow.GenOps.Core
                                resizeNearestNeighborTensorFlow.GenOps.Core
                                resizeNearestNeighbor'TensorFlow.GenOps.Core
                                resizeNearestNeighborGradTensorFlow.GenOps.Core
                                resizeNearestNeighborGrad'TensorFlow.GenOps.Core
                                resourceApplyAdadeltaTensorFlow.GenOps.Core
                                resourceApplyAdadelta'TensorFlow.GenOps.Core
                                resourceApplyAdagradTensorFlow.GenOps.Core
                                resourceApplyAdagrad'TensorFlow.GenOps.Core
                                resourceApplyAdagradDATensorFlow.GenOps.Core
                                resourceApplyAdagradDA'TensorFlow.GenOps.Core
                                resourceApplyAdamTensorFlow.GenOps.Core
                                resourceApplyAdam'TensorFlow.GenOps.Core
                                resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
                                resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                resourceApplyFtrlTensorFlow.GenOps.Core
                                resourceApplyFtrl'TensorFlow.GenOps.Core
                                resourceApplyGradientDescentTensorFlow.GenOps.Core
                                resourceApplyGradientDescent'TensorFlow.GenOps.Core
                                resourceApplyMomentumTensorFlow.GenOps.Core
                                resourceApplyMomentum'TensorFlow.GenOps.Core
                                resourceApplyProximalAdagradTensorFlow.GenOps.Core
                                resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
                                resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
                                resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                resourceApplyRMSPropTensorFlow.GenOps.Core
                                resourceApplyRMSProp'TensorFlow.GenOps.Core
                                resourceGatherTensorFlow.GenOps.Core
                                resourceGather'TensorFlow.GenOps.Core
                                resourceScatterAddTensorFlow.GenOps.Core
                                resourceScatterAdd'TensorFlow.GenOps.Core
                                resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
                                resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
                                resourceSparseApplyAdagradTensorFlow.GenOps.Core
                                resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
                                resourceSparseApplyAdagradDATensorFlow.GenOps.Core
                                resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
                                resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
                                resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                resourceSparseApplyFtrlTensorFlow.GenOps.Core
                                resourceSparseApplyFtrl'TensorFlow.GenOps.Core
                                resourceSparseApplyMomentumTensorFlow.GenOps.Core
                                resourceSparseApplyMomentum'TensorFlow.GenOps.Core
                                resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
                                resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
                                resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
                                resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                resourceSparseApplyRMSPropTensorFlow.GenOps.Core
                                resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
                                restoreTensorFlow.GenOps.Core
                                restore'TensorFlow.GenOps.Core
                                restoreSliceTensorFlow.GenOps.Core
                                restoreSlice'TensorFlow.GenOps.Core
                                restoreV2TensorFlow.GenOps.Core
                                restoreV2'TensorFlow.GenOps.Core
                                reverseTensorFlow.GenOps.Core
                                reverse'TensorFlow.GenOps.Core
                                reverseSequenceTensorFlow.GenOps.Core
                                reverseSequence'TensorFlow.GenOps.Core
                                reverseV2TensorFlow.GenOps.Core
                                reverseV2'TensorFlow.GenOps.Core
                                rGBToHSVTensorFlow.GenOps.Core
                                rGBToHSV'TensorFlow.GenOps.Core
                                rintTensorFlow.GenOps.Core
                                rint'TensorFlow.GenOps.Core
                                roundTensorFlow.GenOps.Core
                                round'TensorFlow.GenOps.Core
                                rsqrtTensorFlow.GenOps.Core
                                rsqrt'TensorFlow.GenOps.Core
                                rsqrtGradTensorFlow.GenOps.Core
                                rsqrtGrad'TensorFlow.GenOps.Core
                                sampleDistortedBoundingBoxTensorFlow.GenOps.Core
                                sampleDistortedBoundingBox'TensorFlow.GenOps.Core
                                saveTensorFlow.GenOps.Core
                                save'TensorFlow.GenOps.Core
                                saveSlicesTensorFlow.GenOps.Core
                                saveSlices'TensorFlow.GenOps.Core
                                saveV2TensorFlow.GenOps.Core
                                saveV2'TensorFlow.GenOps.Core
                                scalarSummaryTensorFlow.GenOps.Core
                                scalarSummary'TensorFlow.GenOps.Core
                                scatterAddTensorFlow.GenOps.Core
                                scatterAdd'TensorFlow.GenOps.Core
                                scatterDivTensorFlow.GenOps.Core
                                scatterDiv'TensorFlow.GenOps.Core
                                scatterMulTensorFlow.GenOps.Core
                                scatterMul'TensorFlow.GenOps.Core
                                scatterNdTensorFlow.GenOps.Core
                                scatterNd'TensorFlow.GenOps.Core
                                scatterNdAddTensorFlow.GenOps.Core
                                scatterNdAdd'TensorFlow.GenOps.Core
                                scatterNdSubTensorFlow.GenOps.Core
                                scatterNdSub'TensorFlow.GenOps.Core
                                scatterNdUpdateTensorFlow.GenOps.Core
                                scatterNdUpdate'TensorFlow.GenOps.Core
                                scatterSubTensorFlow.GenOps.Core
                                scatterSub'TensorFlow.GenOps.Core
                                scatterUpdateTensorFlow.GenOps.Core
                                scatterUpdate'TensorFlow.GenOps.Core
                                sdcaFprintTensorFlow.GenOps.Core
                                sdcaFprint'TensorFlow.GenOps.Core
                                sdcaOptimizerTensorFlow.GenOps.Core
                                sdcaOptimizer'TensorFlow.GenOps.Core
                                sdcaShrinkL1TensorFlow.GenOps.Core
                                sdcaShrinkL1'TensorFlow.GenOps.Core
                                segmentMaxTensorFlow.GenOps.Core
                                segmentMax'TensorFlow.GenOps.Core
                                segmentMeanTensorFlow.GenOps.Core
                                segmentMean'TensorFlow.GenOps.Core
                                segmentMinTensorFlow.GenOps.Core
                                segmentMin'TensorFlow.GenOps.Core
                                segmentProdTensorFlow.GenOps.Core
                                segmentProd'TensorFlow.GenOps.Core
                                segmentSumTensorFlow.GenOps.Core
                                segmentSum'TensorFlow.GenOps.Core
                                selectTensorFlow.GenOps.Core
                                select'TensorFlow.GenOps.Core
                                selfAdjointEigTensorFlow.GenOps.Core
                                selfAdjointEig'TensorFlow.GenOps.Core
                                selfAdjointEigV2TensorFlow.GenOps.Core
                                selfAdjointEigV2'TensorFlow.GenOps.Core
                                serializeManySparseTensorFlow.GenOps.Core
                                serializeManySparse'TensorFlow.GenOps.Core
                                serializeSparseTensorFlow.GenOps.Core
                                serializeSparse'TensorFlow.GenOps.Core
                                setSizeTensorFlow.GenOps.Core
                                setSize'TensorFlow.GenOps.Core
                                shapeTensorFlow.GenOps.Core
                                shape'TensorFlow.GenOps.Core
                                shapeNTensorFlow.GenOps.Core
                                shapeN'TensorFlow.GenOps.Core
                                shardedFilenameTensorFlow.GenOps.Core
                                shardedFilename'TensorFlow.GenOps.Core
                                shardedFilespecTensorFlow.GenOps.Core
                                shardedFilespec'TensorFlow.GenOps.Core
                                sigmoidTensorFlow.GenOps.Core
                                sigmoid'TensorFlow.GenOps.Core
                                sigmoidGradTensorFlow.GenOps.Core
                                sigmoidGrad'TensorFlow.GenOps.Core
                                signTensorFlow.GenOps.Core
                                sign'TensorFlow.GenOps.Core
                                sinTensorFlow.GenOps.Core
                                sin'TensorFlow.GenOps.Core
                                sizeTensorFlow.GenOps.Core
                                size'TensorFlow.GenOps.Core
                                skipgramTensorFlow.GenOps.Core
                                skipgram'TensorFlow.GenOps.Core
                                sliceTensorFlow.GenOps.Core
                                slice'TensorFlow.GenOps.Core
                                softmaxTensorFlow.GenOps.Core
                                softmax'TensorFlow.GenOps.Core
                                softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
                                softmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
                                softplusTensorFlow.GenOps.Core
                                softplus'TensorFlow.GenOps.Core
                                softplusGradTensorFlow.GenOps.Core
                                softplusGrad'TensorFlow.GenOps.Core
                                softsignTensorFlow.GenOps.Core
                                softsign'TensorFlow.GenOps.Core
                                softsignGradTensorFlow.GenOps.Core
                                softsignGrad'TensorFlow.GenOps.Core
                                spaceToBatchTensorFlow.GenOps.Core
                                spaceToBatch'TensorFlow.GenOps.Core
                                spaceToBatchNDTensorFlow.GenOps.Core
                                spaceToBatchND'TensorFlow.GenOps.Core
                                spaceToDepthTensorFlow.GenOps.Core
                                spaceToDepth'TensorFlow.GenOps.Core
                                sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
                                sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
                                sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
                                sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
                                sparseAddTensorFlow.GenOps.Core
                                sparseAdd'TensorFlow.GenOps.Core
                                sparseAddGradTensorFlow.GenOps.Core
                                sparseAddGrad'TensorFlow.GenOps.Core
                                sparseApplyAdadeltaTensorFlow.GenOps.Core
                                sparseApplyAdadelta'TensorFlow.GenOps.Core
                                sparseApplyAdagradTensorFlow.GenOps.Core
                                sparseApplyAdagrad'TensorFlow.GenOps.Core
                                sparseApplyAdagradDATensorFlow.GenOps.Core
                                sparseApplyAdagradDA'TensorFlow.GenOps.Core
                                sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
                                sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                sparseApplyFtrlTensorFlow.GenOps.Core
                                sparseApplyFtrl'TensorFlow.GenOps.Core
                                sparseApplyMomentumTensorFlow.GenOps.Core
                                sparseApplyMomentum'TensorFlow.GenOps.Core
                                sparseApplyProximalAdagradTensorFlow.GenOps.Core
                                sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
                                sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
                                sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                sparseApplyRMSPropTensorFlow.GenOps.Core
                                sparseApplyRMSProp'TensorFlow.GenOps.Core
                                sparseConcatTensorFlow.GenOps.Core
                                sparseConcat'TensorFlow.GenOps.Core
                                sparseConditionalAccumulatorTensorFlow.GenOps.Core
                                sparseConditionalAccumulator'TensorFlow.GenOps.Core
                                sparseDenseCwiseAddTensorFlow.GenOps.Core
                                sparseDenseCwiseAdd'TensorFlow.GenOps.Core
                                sparseDenseCwiseDivTensorFlow.GenOps.Core
                                sparseDenseCwiseDiv'TensorFlow.GenOps.Core
                                sparseDenseCwiseMulTensorFlow.GenOps.Core
                                sparseDenseCwiseMul'TensorFlow.GenOps.Core
                                sparseMatMulTensorFlow.GenOps.Core
                                sparseMatMul'TensorFlow.GenOps.Core
                                sparseReduceSumTensorFlow.GenOps.Core
                                sparseReduceSum'TensorFlow.GenOps.Core
                                sparseReduceSumSparseTensorFlow.GenOps.Core
                                sparseReduceSumSparse'TensorFlow.GenOps.Core
                                sparseReorderTensorFlow.GenOps.Core
                                sparseReorder'TensorFlow.GenOps.Core
                                sparseReshapeTensorFlow.GenOps.Core
                                sparseReshape'TensorFlow.GenOps.Core
                                sparseSegmentMeanTensorFlow.GenOps.Core
                                sparseSegmentMean'TensorFlow.GenOps.Core
                                sparseSegmentMeanGradTensorFlow.GenOps.Core
                                sparseSegmentMeanGrad'TensorFlow.GenOps.Core
                                sparseSegmentSqrtNTensorFlow.GenOps.Core
                                sparseSegmentSqrtN'TensorFlow.GenOps.Core
                                sparseSegmentSqrtNGradTensorFlow.GenOps.Core
                                sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
                                sparseSegmentSumTensorFlow.GenOps.Core
                                sparseSegmentSum'TensorFlow.GenOps.Core
                                sparseSoftmaxTensorFlow.GenOps.Core
                                sparseSoftmax'TensorFlow.GenOps.Core
                                sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
                                sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
                                sparseSparseMaximumTensorFlow.GenOps.Core
                                sparseSparseMaximum'TensorFlow.GenOps.Core
                                sparseSparseMinimumTensorFlow.GenOps.Core
                                sparseSparseMinimum'TensorFlow.GenOps.Core
                                sparseSplitTensorFlow.GenOps.Core
                                sparseSplit'TensorFlow.GenOps.Core
                                sparseTensorDenseAddTensorFlow.GenOps.Core
                                sparseTensorDenseAdd'TensorFlow.GenOps.Core
                                sparseTensorDenseMatMulTensorFlow.GenOps.Core
                                sparseTensorDenseMatMul'TensorFlow.GenOps.Core
                                sparseToDenseTensorFlow.GenOps.Core
                                sparseToDense'TensorFlow.GenOps.Core
                                sparseToSparseSetOperationTensorFlow.GenOps.Core
                                sparseToSparseSetOperation'TensorFlow.GenOps.Core
                                splitTensorFlow.GenOps.Core
                                split'TensorFlow.GenOps.Core
                                splitVTensorFlow.GenOps.Core
                                splitV'TensorFlow.GenOps.Core
                                sqrtTensorFlow.GenOps.Core
                                sqrt'TensorFlow.GenOps.Core
                                sqrtGradTensorFlow.GenOps.Core
                                sqrtGrad'TensorFlow.GenOps.Core
                                squareTensorFlow.GenOps.Core
                                square'TensorFlow.GenOps.Core
                                squaredDifferenceTensorFlow.GenOps.Core
                                squaredDifference'TensorFlow.GenOps.Core
                                squeezeTensorFlow.GenOps.Core
                                squeeze'TensorFlow.GenOps.Core
                                stackTensorFlow.GenOps.Core
                                stack'TensorFlow.GenOps.Core
                                stackCloseTensorFlow.GenOps.Core
                                stackClose'TensorFlow.GenOps.Core
                                stackPopTensorFlow.GenOps.Core
                                stackPop'TensorFlow.GenOps.Core
                                stackPushTensorFlow.GenOps.Core
                                stackPush'TensorFlow.GenOps.Core
                                stageTensorFlow.GenOps.Core
                                stage'TensorFlow.GenOps.Core
                                stopGradientTensorFlow.GenOps.Core
                                stopGradient'TensorFlow.GenOps.Core
                                stridedSliceTensorFlow.GenOps.Core
                                stridedSlice'TensorFlow.GenOps.Core
                                stridedSliceAssignTensorFlow.GenOps.Core
                                stridedSliceAssign'TensorFlow.GenOps.Core
                                stridedSliceGradTensorFlow.GenOps.Core
                                stridedSliceGrad'TensorFlow.GenOps.Core
                                stringJoinTensorFlow.GenOps.Core
                                stringJoin'TensorFlow.GenOps.Core
                                stringSplitTensorFlow.GenOps.Core
                                stringSplit'TensorFlow.GenOps.Core
                                stringToHashBucketTensorFlow.GenOps.Core
                                stringToHashBucket'TensorFlow.GenOps.Core
                                stringToHashBucketFastTensorFlow.GenOps.Core
                                stringToHashBucketFast'TensorFlow.GenOps.Core
                                stringToHashBucketStrongTensorFlow.GenOps.Core
                                stringToHashBucketStrong'TensorFlow.GenOps.Core
                                stringToNumberTensorFlow.GenOps.Core
                                stringToNumber'TensorFlow.GenOps.Core
                                subTensorFlow.GenOps.Core
                                sub'TensorFlow.GenOps.Core
                                substrTensorFlow.GenOps.Core
                                substr'TensorFlow.GenOps.Core
                                sumTensorFlow.GenOps.Core
                                sum'TensorFlow.GenOps.Core
                                svdTensorFlow.GenOps.Core
                                svd'TensorFlow.GenOps.Core
                                switchTensorFlow.GenOps.Core
                                switch'TensorFlow.GenOps.Core
                                takeManySparseFromTensorsMapTensorFlow.GenOps.Core
                                takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
                                tanTensorFlow.GenOps.Core
                                tan'TensorFlow.GenOps.Core
                                tanhTensorFlow.GenOps.Core
                                tanh'TensorFlow.GenOps.Core
                                tanhGradTensorFlow.GenOps.Core
                                tanhGrad'TensorFlow.GenOps.Core
                                temporaryVariableTensorFlow.GenOps.Core
                                temporaryVariable'TensorFlow.GenOps.Core
                                tensorArrayTensorFlow.GenOps.Core
                                tensorArray'TensorFlow.GenOps.Core
                                tensorArrayCloseTensorFlow.GenOps.Core
                                tensorArrayClose'TensorFlow.GenOps.Core
                                tensorArrayCloseV2TensorFlow.GenOps.Core
                                tensorArrayCloseV2'TensorFlow.GenOps.Core
                                tensorArrayCloseV3TensorFlow.GenOps.Core
                                tensorArrayCloseV3'TensorFlow.GenOps.Core
                                tensorArrayConcatTensorFlow.GenOps.Core
                                tensorArrayConcat'TensorFlow.GenOps.Core
                                tensorArrayConcatV2TensorFlow.GenOps.Core
                                tensorArrayConcatV2'TensorFlow.GenOps.Core
                                tensorArrayConcatV3TensorFlow.GenOps.Core
                                tensorArrayConcatV3'TensorFlow.GenOps.Core
                                tensorArrayGatherTensorFlow.GenOps.Core
                                tensorArrayGather'TensorFlow.GenOps.Core
                                tensorArrayGatherV2TensorFlow.GenOps.Core
                                tensorArrayGatherV2'TensorFlow.GenOps.Core
                                tensorArrayGatherV3TensorFlow.GenOps.Core
                                tensorArrayGatherV3'TensorFlow.GenOps.Core
                                tensorArrayGradTensorFlow.GenOps.Core
                                tensorArrayGrad'TensorFlow.GenOps.Core
                                tensorArrayGradV2TensorFlow.GenOps.Core
                                tensorArrayGradV2'TensorFlow.GenOps.Core
                                tensorArrayGradV3TensorFlow.GenOps.Core
                                tensorArrayGradV3'TensorFlow.GenOps.Core
                                tensorArrayPackTensorFlow.GenOps.Core
                                tensorArrayPack'TensorFlow.GenOps.Core
                                tensorArrayReadTensorFlow.GenOps.Core
                                tensorArrayRead'TensorFlow.GenOps.Core
                                tensorArrayReadV2TensorFlow.GenOps.Core
                                tensorArrayReadV2'TensorFlow.GenOps.Core
                                tensorArrayReadV3TensorFlow.GenOps.Core
                                tensorArrayReadV3'TensorFlow.GenOps.Core
                                tensorArrayScatterTensorFlow.GenOps.Core
                                tensorArrayScatter'TensorFlow.GenOps.Core
                                tensorArrayScatterV2TensorFlow.GenOps.Core
                                tensorArrayScatterV2'TensorFlow.GenOps.Core
                                tensorArrayScatterV3TensorFlow.GenOps.Core
                                tensorArrayScatterV3'TensorFlow.GenOps.Core
                                tensorArraySizeTensorFlow.GenOps.Core
                                tensorArraySize'TensorFlow.GenOps.Core
                                tensorArraySizeV2TensorFlow.GenOps.Core
                                tensorArraySizeV2'TensorFlow.GenOps.Core
                                tensorArraySizeV3TensorFlow.GenOps.Core
                                tensorArraySizeV3'TensorFlow.GenOps.Core
                                tensorArraySplitTensorFlow.GenOps.Core
                                tensorArraySplit'TensorFlow.GenOps.Core
                                tensorArraySplitV2TensorFlow.GenOps.Core
                                tensorArraySplitV2'TensorFlow.GenOps.Core
                                tensorArraySplitV3TensorFlow.GenOps.Core
                                tensorArraySplitV3'TensorFlow.GenOps.Core
                                tensorArrayUnpackTensorFlow.GenOps.Core
                                tensorArrayUnpack'TensorFlow.GenOps.Core
                                tensorArrayV2TensorFlow.GenOps.Core
                                tensorArrayV2'TensorFlow.GenOps.Core
                                tensorArrayV3TensorFlow.GenOps.Core
                                tensorArrayV3'TensorFlow.GenOps.Core
                                tensorArrayWriteTensorFlow.GenOps.Core
                                tensorArrayWrite'TensorFlow.GenOps.Core
                                tensorArrayWriteV2TensorFlow.GenOps.Core
                                tensorArrayWriteV2'TensorFlow.GenOps.Core
                                tensorArrayWriteV3TensorFlow.GenOps.Core
                                tensorArrayWriteV3'TensorFlow.GenOps.Core
                                tensorSummaryTensorFlow.GenOps.Core
                                tensorSummary'TensorFlow.GenOps.Core
                                textLineReaderTensorFlow.GenOps.Core
                                textLineReader'TensorFlow.GenOps.Core
                                textLineReaderV2TensorFlow.GenOps.Core
                                textLineReaderV2'TensorFlow.GenOps.Core
                                tFRecordReaderTensorFlow.GenOps.Core
                                tFRecordReader'TensorFlow.GenOps.Core
                                tFRecordReaderV2TensorFlow.GenOps.Core
                                tFRecordReaderV2'TensorFlow.GenOps.Core
                                threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
                                threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
                                tileTensorFlow.GenOps.Core
                                tile'TensorFlow.GenOps.Core
                                tileGradTensorFlow.GenOps.Core
                                tileGrad'TensorFlow.GenOps.Core
                                topKTensorFlow.GenOps.Core
                                topK'TensorFlow.GenOps.Core
                                topKV2TensorFlow.GenOps.Core
                                topKV2'TensorFlow.GenOps.Core
                                transposeTensorFlow.GenOps.Core
                                transpose'TensorFlow.GenOps.Core
                                truncateDivTensorFlow.GenOps.Core
                                truncateDiv'TensorFlow.GenOps.Core
                                truncatedNormalTensorFlow.GenOps.Core
                                truncatedNormal'TensorFlow.GenOps.Core
                                truncateModTensorFlow.GenOps.Core
                                truncateMod'TensorFlow.GenOps.Core
                                uniformCandidateSamplerTensorFlow.GenOps.Core
                                uniformCandidateSampler'TensorFlow.GenOps.Core
                                uniqueTensorFlow.GenOps.Core
                                unique'TensorFlow.GenOps.Core
                                uniqueWithCountsTensorFlow.GenOps.Core
                                uniqueWithCounts'TensorFlow.GenOps.Core
                                unpackTensorFlow.GenOps.Core
                                unpack'TensorFlow.GenOps.Core
                                unsortedSegmentSumTensorFlow.GenOps.Core
                                unsortedSegmentSum'TensorFlow.GenOps.Core
                                unstageTensorFlow.GenOps.Core
                                unstage'TensorFlow.GenOps.Core
                                varHandleOpTensorFlow.GenOps.Core
                                varHandleOp'TensorFlow.GenOps.Core
                                variableTensorFlow.GenOps.Core
                                variable'TensorFlow.GenOps.Core
                                variableV2TensorFlow.GenOps.Core
                                variableV2'TensorFlow.GenOps.Core
                                varIsInitializedOpTensorFlow.GenOps.Core
                                varIsInitializedOp'TensorFlow.GenOps.Core
                                where'TensorFlow.GenOps.Core
                                where''TensorFlow.GenOps.Core
                                wholeFileReaderTensorFlow.GenOps.Core
                                wholeFileReader'TensorFlow.GenOps.Core
                                wholeFileReaderV2TensorFlow.GenOps.Core
                                wholeFileReaderV2'TensorFlow.GenOps.Core
                                writeFileTensorFlow.GenOps.Core
                                writeFile'TensorFlow.GenOps.Core
                                zerosLikeTensorFlow.GenOps.Core
                                zerosLike'TensorFlow.GenOps.Core
                                zetaTensorFlow.GenOps.Core
                                zeta'TensorFlow.GenOps.Core
                                _ArgTensorFlow.GenOps.Core
                                _Arg'TensorFlow.GenOps.Core
                                _ArrayToListTensorFlow.GenOps.Core
                                _ArrayToList'TensorFlow.GenOps.Core
                                _HostCastTensorFlow.GenOps.Core
                                _HostCast'TensorFlow.GenOps.Core
                                _HostRecvTensorFlow.GenOps.Core
                                _HostRecv'TensorFlow.GenOps.Core
                                _HostSendTensorFlow.GenOps.Core
                                _HostSend'TensorFlow.GenOps.Core
                                _ListToArrayTensorFlow.GenOps.Core
                                _ListToArray'TensorFlow.GenOps.Core
                                _ParallelConcatStartTensorFlow.GenOps.Core
                                _ParallelConcatStart'TensorFlow.GenOps.Core
                                _ParallelConcatUpdateTensorFlow.GenOps.Core
                                _ParallelConcatUpdate'TensorFlow.GenOps.Core
                                _RecvTensorFlow.GenOps.Core
                                _Recv'TensorFlow.GenOps.Core
                                _RetvalTensorFlow.GenOps.Core
                                _Retval'TensorFlow.GenOps.Core
                                _SendTensorFlow.GenOps.Core
                                _Send'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index

                                abortTensorFlow.GenOps.Core
                                abort'TensorFlow.GenOps.Core
                                absTensorFlow.GenOps.Core
                                abs'TensorFlow.GenOps.Core
                                accumulatorApplyGradientTensorFlow.GenOps.Core
                                accumulatorApplyGradient'TensorFlow.GenOps.Core
                                accumulatorNumAccumulatedTensorFlow.GenOps.Core
                                accumulatorNumAccumulated'TensorFlow.GenOps.Core
                                accumulatorSetGlobalStepTensorFlow.GenOps.Core
                                accumulatorSetGlobalStep'TensorFlow.GenOps.Core
                                accumulatorTakeGradientTensorFlow.GenOps.Core
                                accumulatorTakeGradient'TensorFlow.GenOps.Core
                                acosTensorFlow.GenOps.Core
                                acos'TensorFlow.GenOps.Core
                                acoshTensorFlow.GenOps.Core
                                acosh'TensorFlow.GenOps.Core
                                addTensorFlow.GenOps.Core
                                add'TensorFlow.GenOps.Core
                                addManySparseToTensorsMapTensorFlow.GenOps.Core
                                addManySparseToTensorsMap'TensorFlow.GenOps.Core
                                addNTensorFlow.GenOps.Core
                                addN'TensorFlow.GenOps.Core
                                addSparseToTensorsMapTensorFlow.GenOps.Core
                                addSparseToTensorsMap'TensorFlow.GenOps.Core
                                adjustContrastTensorFlow.GenOps.Core
                                adjustContrast'TensorFlow.GenOps.Core
                                adjustContrastv2TensorFlow.GenOps.Core
                                adjustContrastv2'TensorFlow.GenOps.Core
                                adjustHueTensorFlow.GenOps.Core
                                adjustHue'TensorFlow.GenOps.Core
                                adjustSaturationTensorFlow.GenOps.Core
                                adjustSaturation'TensorFlow.GenOps.Core
                                allTensorFlow.GenOps.Core
                                all'TensorFlow.GenOps.Core
                                allCandidateSamplerTensorFlow.GenOps.Core
                                allCandidateSampler'TensorFlow.GenOps.Core
                                anyTensorFlow.GenOps.Core
                                any'TensorFlow.GenOps.Core
                                applyAdadeltaTensorFlow.GenOps.Core
                                applyAdadelta'TensorFlow.GenOps.Core
                                applyAdagradTensorFlow.GenOps.Core
                                applyAdagrad'TensorFlow.GenOps.Core
                                applyAdagradDATensorFlow.GenOps.Core
                                applyAdagradDA'TensorFlow.GenOps.Core
                                applyAdamTensorFlow.GenOps.Core
                                applyAdam'TensorFlow.GenOps.Core
                                applyCenteredRMSPropTensorFlow.GenOps.Core
                                applyCenteredRMSProp'TensorFlow.GenOps.Core
                                applyDelayCompensatedGradientDescentTensorFlow.GenOps.Core
                                applyDelayCompensatedGradientDescent'TensorFlow.GenOps.Core
                                applyFtrlTensorFlow.GenOps.Core
                                applyFtrl'TensorFlow.GenOps.Core
                                applyFtrlV2TensorFlow.GenOps.Core
                                applyFtrlV2'TensorFlow.GenOps.Core
                                applyGradientDescentTensorFlow.GenOps.Core
                                applyGradientDescent'TensorFlow.GenOps.Core
                                applyMomentumTensorFlow.GenOps.Core
                                applyMomentum'TensorFlow.GenOps.Core
                                applyProximalAdagradTensorFlow.GenOps.Core
                                applyProximalAdagrad'TensorFlow.GenOps.Core
                                applyProximalGradientDescentTensorFlow.GenOps.Core
                                applyProximalGradientDescent'TensorFlow.GenOps.Core
                                applyRMSPropTensorFlow.GenOps.Core
                                applyRMSProp'TensorFlow.GenOps.Core
                                approximateEqualTensorFlow.GenOps.Core
                                approximateEqual'TensorFlow.GenOps.Core
                                argMaxTensorFlow.GenOps.Core
                                argMax'TensorFlow.GenOps.Core
                                argMinTensorFlow.GenOps.Core
                                argMin'TensorFlow.GenOps.Core
                                asinTensorFlow.GenOps.Core
                                asin'TensorFlow.GenOps.Core
                                asinhTensorFlow.GenOps.Core
                                asinh'TensorFlow.GenOps.Core
                                assertTensorFlow.GenOps.Core
                                assert'TensorFlow.GenOps.Core
                                assignTensorFlow.GenOps.Core
                                assign'TensorFlow.GenOps.Core
                                assignAddTensorFlow.GenOps.Core
                                assignAdd'TensorFlow.GenOps.Core
                                assignAddVariableOpTensorFlow.GenOps.Core
                                assignAddVariableOp'TensorFlow.GenOps.Core
                                assignSubTensorFlow.GenOps.Core
                                assignSub'TensorFlow.GenOps.Core
                                assignSubVariableOpTensorFlow.GenOps.Core
                                assignSubVariableOp'TensorFlow.GenOps.Core
                                assignVariableOpTensorFlow.GenOps.Core
                                assignVariableOp'TensorFlow.GenOps.Core
                                asStringTensorFlow.GenOps.Core
                                asString'TensorFlow.GenOps.Core
                                atanTensorFlow.GenOps.Core
                                atan'TensorFlow.GenOps.Core
                                atan2TensorFlow.GenOps.Core
                                atan2'TensorFlow.GenOps.Core
                                atanhTensorFlow.GenOps.Core
                                atanh'TensorFlow.GenOps.Core
                                audioSpectrogramTensorFlow.GenOps.Core
                                audioSpectrogram'TensorFlow.GenOps.Core
                                audioSummaryTensorFlow.GenOps.Core
                                audioSummary'TensorFlow.GenOps.Core
                                audioSummaryV2TensorFlow.GenOps.Core
                                audioSummaryV2'TensorFlow.GenOps.Core
                                avgPoolTensorFlow.GenOps.Core
                                avgPool'TensorFlow.GenOps.Core
                                avgPool3DTensorFlow.GenOps.Core
                                avgPool3D'TensorFlow.GenOps.Core
                                avgPool3DGradTensorFlow.GenOps.Core
                                avgPool3DGrad'TensorFlow.GenOps.Core
                                avgPoolGradTensorFlow.GenOps.Core
                                avgPoolGrad'TensorFlow.GenOps.Core
                                barrierTensorFlow.GenOps.Core
                                barrier'TensorFlow.GenOps.Core
                                barrierCloseTensorFlow.GenOps.Core
                                barrierClose'TensorFlow.GenOps.Core
                                barrierIncompleteSizeTensorFlow.GenOps.Core
                                barrierIncompleteSize'TensorFlow.GenOps.Core
                                barrierInsertManyTensorFlow.GenOps.Core
                                barrierInsertMany'TensorFlow.GenOps.Core
                                barrierReadySizeTensorFlow.GenOps.Core
                                barrierReadySize'TensorFlow.GenOps.Core
                                barrierTakeManyTensorFlow.GenOps.Core
                                barrierTakeMany'TensorFlow.GenOps.Core
                                batchCholeskyTensorFlow.GenOps.Core
                                batchCholesky'TensorFlow.GenOps.Core
                                batchCholeskyGradTensorFlow.GenOps.Core
                                batchCholeskyGrad'TensorFlow.GenOps.Core
                                batchDatasetTensorFlow.GenOps.Core
                                batchDataset'TensorFlow.GenOps.Core
                                batchFFTTensorFlow.GenOps.Core
                                batchFFT'TensorFlow.GenOps.Core
                                batchFFT2DTensorFlow.GenOps.Core
                                batchFFT2D'TensorFlow.GenOps.Core
                                batchFFT3DTensorFlow.GenOps.Core
                                batchFFT3D'TensorFlow.GenOps.Core
                                batchIFFTTensorFlow.GenOps.Core
                                batchIFFT'TensorFlow.GenOps.Core
                                batchIFFT2DTensorFlow.GenOps.Core
                                batchIFFT2D'TensorFlow.GenOps.Core
                                batchIFFT3DTensorFlow.GenOps.Core
                                batchIFFT3D'TensorFlow.GenOps.Core
                                batchMatMulTensorFlow.GenOps.Core
                                batchMatMul'TensorFlow.GenOps.Core
                                batchMatrixBandPartTensorFlow.GenOps.Core
                                batchMatrixBandPart'TensorFlow.GenOps.Core
                                batchMatrixDeterminantTensorFlow.GenOps.Core
                                batchMatrixDeterminant'TensorFlow.GenOps.Core
                                batchMatrixDiagTensorFlow.GenOps.Core
                                batchMatrixDiag'TensorFlow.GenOps.Core
                                batchMatrixDiagPartTensorFlow.GenOps.Core
                                batchMatrixDiagPart'TensorFlow.GenOps.Core
                                batchMatrixInverseTensorFlow.GenOps.Core
                                batchMatrixInverse'TensorFlow.GenOps.Core
                                batchMatrixSetDiagTensorFlow.GenOps.Core
                                batchMatrixSetDiag'TensorFlow.GenOps.Core
                                batchMatrixSolveTensorFlow.GenOps.Core
                                batchMatrixSolve'TensorFlow.GenOps.Core
                                batchMatrixSolveLsTensorFlow.GenOps.Core
                                batchMatrixSolveLs'TensorFlow.GenOps.Core
                                batchMatrixTriangularSolveTensorFlow.GenOps.Core
                                batchMatrixTriangularSolve'TensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
                                batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
                                batchSelfAdjointEigTensorFlow.GenOps.Core
                                batchSelfAdjointEig'TensorFlow.GenOps.Core
                                batchSelfAdjointEigV2TensorFlow.GenOps.Core
                                batchSelfAdjointEigV2'TensorFlow.GenOps.Core
                                batchSvdTensorFlow.GenOps.Core
                                batchSvd'TensorFlow.GenOps.Core
                                batchToSpaceTensorFlow.GenOps.Core
                                batchToSpace'TensorFlow.GenOps.Core
                                batchToSpaceNDTensorFlow.GenOps.Core
                                batchToSpaceND'TensorFlow.GenOps.Core
                                betaincTensorFlow.GenOps.Core
                                betainc'TensorFlow.GenOps.Core
                                biasAddTensorFlow.GenOps.Core
                                biasAdd'TensorFlow.GenOps.Core
                                biasAddGradTensorFlow.GenOps.Core
                                biasAddGrad'TensorFlow.GenOps.Core
                                biasAddV1TensorFlow.GenOps.Core
                                biasAddV1'TensorFlow.GenOps.Core
                                bincountTensorFlow.GenOps.Core
                                bincount'TensorFlow.GenOps.Core
                                bitcastTensorFlow.GenOps.Core
                                bitcast'TensorFlow.GenOps.Core
                                bitwiseAndTensorFlow.GenOps.Core
                                bitwiseAnd'TensorFlow.GenOps.Core
                                bitwiseOrTensorFlow.GenOps.Core
                                bitwiseOr'TensorFlow.GenOps.Core
                                bitwiseXorTensorFlow.GenOps.Core
                                bitwiseXor'TensorFlow.GenOps.Core
                                broadcastArgsTensorFlow.GenOps.Core
                                broadcastArgs'TensorFlow.GenOps.Core
                                broadcastGradientArgsTensorFlow.GenOps.Core
                                broadcastGradientArgs'TensorFlow.GenOps.Core
                                bucketizeTensorFlow.GenOps.Core
                                bucketize'TensorFlow.GenOps.Core
                                cacheDatasetTensorFlow.GenOps.Core
                                cacheDataset'TensorFlow.GenOps.Core
                                castTensorFlow.GenOps.Core
                                cast'TensorFlow.GenOps.Core
                                ceilTensorFlow.GenOps.Core
                                ceil'TensorFlow.GenOps.Core
                                checkNumericsTensorFlow.GenOps.Core
                                checkNumerics'TensorFlow.GenOps.Core
                                choleskyTensorFlow.GenOps.Core
                                cholesky'TensorFlow.GenOps.Core
                                choleskyGradTensorFlow.GenOps.Core
                                choleskyGrad'TensorFlow.GenOps.Core
                                complexTensorFlow.GenOps.Core
                                complex'TensorFlow.GenOps.Core
                                complexAbsTensorFlow.GenOps.Core
                                complexAbs'TensorFlow.GenOps.Core
                                computeAccidentalHitsTensorFlow.GenOps.Core
                                computeAccidentalHits'TensorFlow.GenOps.Core
                                concatTensorFlow.GenOps.Core
                                concat'TensorFlow.GenOps.Core
                                concatenateDatasetTensorFlow.GenOps.Core
                                concatenateDataset'TensorFlow.GenOps.Core
                                concatOffsetTensorFlow.GenOps.Core
                                concatOffset'TensorFlow.GenOps.Core
                                concatV2TensorFlow.GenOps.Core
                                concatV2'TensorFlow.GenOps.Core
                                conditionalAccumulatorTensorFlow.GenOps.Core
                                conditionalAccumulator'TensorFlow.GenOps.Core
                                conjTensorFlow.GenOps.Core
                                conj'TensorFlow.GenOps.Core
                                constTensorFlow.GenOps.Core
                                const'TensorFlow.GenOps.Core
                                controlTriggerTensorFlow.GenOps.Core
                                controlTrigger'TensorFlow.GenOps.Core
                                conv2DTensorFlow.GenOps.Core
                                conv2D'TensorFlow.GenOps.Core
                                conv2DBackpropFilterTensorFlow.GenOps.Core
                                conv2DBackpropFilter'TensorFlow.GenOps.Core
                                conv2DBackpropInputTensorFlow.GenOps.Core
                                conv2DBackpropInput'TensorFlow.GenOps.Core
                                conv3DTensorFlow.GenOps.Core
                                conv3D'TensorFlow.GenOps.Core
                                conv3DBackpropFilterTensorFlow.GenOps.Core
                                conv3DBackpropFilter'TensorFlow.GenOps.Core
                                conv3DBackpropFilterV2TensorFlow.GenOps.Core
                                conv3DBackpropFilterV2'TensorFlow.GenOps.Core
                                conv3DBackpropInputTensorFlow.GenOps.Core
                                conv3DBackpropInput'TensorFlow.GenOps.Core
                                conv3DBackpropInputV2TensorFlow.GenOps.Core
                                conv3DBackpropInputV2'TensorFlow.GenOps.Core
                                cosTensorFlow.GenOps.Core
                                cos'TensorFlow.GenOps.Core
                                coshTensorFlow.GenOps.Core
                                cosh'TensorFlow.GenOps.Core
                                countUpToTensorFlow.GenOps.Core
                                countUpTo'TensorFlow.GenOps.Core
                                cropAndResizeTensorFlow.GenOps.Core
                                cropAndResize'TensorFlow.GenOps.Core
                                cropAndResizeGradBoxesTensorFlow.GenOps.Core
                                cropAndResizeGradBoxes'TensorFlow.GenOps.Core
                                cropAndResizeGradImageTensorFlow.GenOps.Core
                                cropAndResizeGradImage'TensorFlow.GenOps.Core
                                crossTensorFlow.GenOps.Core
                                cross'TensorFlow.GenOps.Core
                                cTCBeamSearchDecoderTensorFlow.GenOps.Core
                                cTCBeamSearchDecoder'TensorFlow.GenOps.Core
                                cTCGreedyDecoderTensorFlow.GenOps.Core
                                cTCGreedyDecoder'TensorFlow.GenOps.Core
                                cTCLossTensorFlow.GenOps.Core
                                cTCLoss'TensorFlow.GenOps.Core
                                cumprodTensorFlow.GenOps.Core
                                cumprod'TensorFlow.GenOps.Core
                                cumsumTensorFlow.GenOps.Core
                                cumsum'TensorFlow.GenOps.Core
                                debugGradientIdentityTensorFlow.GenOps.Core
                                debugGradientIdentity'TensorFlow.GenOps.Core
                                decodeBase64TensorFlow.GenOps.Core
                                decodeBase64'TensorFlow.GenOps.Core
                                decodeBmpTensorFlow.GenOps.Core
                                decodeBmp'TensorFlow.GenOps.Core
                                decodeCSVTensorFlow.GenOps.Core
                                decodeCSV'TensorFlow.GenOps.Core
                                decodeGifTensorFlow.GenOps.Core
                                decodeGif'TensorFlow.GenOps.Core
                                decodeJpegTensorFlow.GenOps.Core
                                decodeJpeg'TensorFlow.GenOps.Core
                                decodeJSONExampleTensorFlow.GenOps.Core
                                decodeJSONExample'TensorFlow.GenOps.Core
                                decodePngTensorFlow.GenOps.Core
                                decodePng'TensorFlow.GenOps.Core
                                decodeRawTensorFlow.GenOps.Core
                                decodeRaw'TensorFlow.GenOps.Core
                                decodeWavTensorFlow.GenOps.Core
                                decodeWav'TensorFlow.GenOps.Core
                                deleteSessionTensorTensorFlow.GenOps.Core
                                deleteSessionTensor'TensorFlow.GenOps.Core
                                denseToDenseSetOperationTensorFlow.GenOps.Core
                                denseToDenseSetOperation'TensorFlow.GenOps.Core
                                denseToSparseBatchDatasetTensorFlow.GenOps.Core
                                denseToSparseBatchDataset'TensorFlow.GenOps.Core
                                denseToSparseSetOperationTensorFlow.GenOps.Core
                                denseToSparseSetOperation'TensorFlow.GenOps.Core
                                depthToSpaceTensorFlow.GenOps.Core
                                depthToSpace'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeTensorFlow.GenOps.Core
                                depthwiseConv2dNative'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
                                dequantizeTensorFlow.GenOps.Core
                                dequantize'TensorFlow.GenOps.Core
                                deserializeManySparseTensorFlow.GenOps.Core
                                deserializeManySparse'TensorFlow.GenOps.Core
                                destroyResourceOpTensorFlow.GenOps.Core
                                destroyResourceOp'TensorFlow.GenOps.Core
                                destroyTemporaryVariableTensorFlow.GenOps.Core
                                destroyTemporaryVariable'TensorFlow.GenOps.Core
                                diagTensorFlow.GenOps.Core
                                diag'TensorFlow.GenOps.Core
                                diagPartTensorFlow.GenOps.Core
                                diagPart'TensorFlow.GenOps.Core
                                digammaTensorFlow.GenOps.Core
                                digamma'TensorFlow.GenOps.Core
                                dilation2DTensorFlow.GenOps.Core
                                dilation2D'TensorFlow.GenOps.Core
                                dilation2DBackpropFilterTensorFlow.GenOps.Core
                                dilation2DBackpropFilter'TensorFlow.GenOps.Core
                                dilation2DBackpropInputTensorFlow.GenOps.Core
                                dilation2DBackpropInput'TensorFlow.GenOps.Core
                                divTensorFlow.GenOps.Core
                                div'TensorFlow.GenOps.Core
                                drawBoundingBoxesTensorFlow.GenOps.Core
                                drawBoundingBoxes'TensorFlow.GenOps.Core
                                dynamicPartitionTensorFlow.GenOps.Core
                                dynamicPartition'TensorFlow.GenOps.Core
                                dynamicStitchTensorFlow.GenOps.Core
                                dynamicStitch'TensorFlow.GenOps.Core
                                editDistanceTensorFlow.GenOps.Core
                                editDistance'TensorFlow.GenOps.Core
                                eluTensorFlow.GenOps.Core
                                elu'TensorFlow.GenOps.Core
                                eluGradTensorFlow.GenOps.Core
                                eluGrad'TensorFlow.GenOps.Core
                                encodeBase64TensorFlow.GenOps.Core
                                encodeBase64'TensorFlow.GenOps.Core
                                encodeJpegTensorFlow.GenOps.Core
                                encodeJpeg'TensorFlow.GenOps.Core
                                encodePngTensorFlow.GenOps.Core
                                encodePng'TensorFlow.GenOps.Core
                                encodeWavTensorFlow.GenOps.Core
                                encodeWav'TensorFlow.GenOps.Core
                                enterTensorFlow.GenOps.Core
                                enter'TensorFlow.GenOps.Core
                                equalTensorFlow.GenOps.Core
                                equal'TensorFlow.GenOps.Core
                                erfTensorFlow.GenOps.Core
                                erf'TensorFlow.GenOps.Core
                                erfcTensorFlow.GenOps.Core
                                erfc'TensorFlow.GenOps.Core
                                exitTensorFlow.GenOps.Core
                                exit'TensorFlow.GenOps.Core
                                expTensorFlow.GenOps.Core
                                exp'TensorFlow.GenOps.Core
                                expandDimsTensorFlow.GenOps.Core
                                expandDims'TensorFlow.GenOps.Core
                                expm1TensorFlow.GenOps.Core
                                expm1'TensorFlow.GenOps.Core
                                extractGlimpseTensorFlow.GenOps.Core
                                extractGlimpse'TensorFlow.GenOps.Core
                                extractImagePatchesTensorFlow.GenOps.Core
                                extractImagePatches'TensorFlow.GenOps.Core
                                factTensorFlow.GenOps.Core
                                fact'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
                                fakeQueueTensorFlow.GenOps.Core
                                fakeQueue'TensorFlow.GenOps.Core
                                fFTTensorFlow.GenOps.Core
                                fFT'TensorFlow.GenOps.Core
                                fFT2DTensorFlow.GenOps.Core
                                fFT2D'TensorFlow.GenOps.Core
                                fFT3DTensorFlow.GenOps.Core
                                fFT3D'TensorFlow.GenOps.Core
                                fIFOQueueTensorFlow.GenOps.Core
                                fIFOQueue'TensorFlow.GenOps.Core
                                fIFOQueueV2TensorFlow.GenOps.Core
                                fIFOQueueV2'TensorFlow.GenOps.Core
                                fillTensorFlow.GenOps.Core
                                fill'TensorFlow.GenOps.Core
                                fixedLengthRecordDatasetTensorFlow.GenOps.Core
                                fixedLengthRecordDataset'TensorFlow.GenOps.Core
                                fixedLengthRecordReaderTensorFlow.GenOps.Core
                                fixedLengthRecordReader'TensorFlow.GenOps.Core
                                fixedLengthRecordReaderV2TensorFlow.GenOps.Core
                                fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
                                fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
                                fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
                                floorTensorFlow.GenOps.Core
                                floor'TensorFlow.GenOps.Core
                                floorDivTensorFlow.GenOps.Core
                                floorDiv'TensorFlow.GenOps.Core
                                floorModTensorFlow.GenOps.Core
                                floorMod'TensorFlow.GenOps.Core
                                fractionalAvgPoolTensorFlow.GenOps.Core
                                fractionalAvgPool'TensorFlow.GenOps.Core
                                fractionalAvgPoolGradTensorFlow.GenOps.Core
                                fractionalAvgPoolGrad'TensorFlow.GenOps.Core
                                fractionalMaxPoolTensorFlow.GenOps.Core
                                fractionalMaxPool'TensorFlow.GenOps.Core
                                fractionalMaxPoolGradTensorFlow.GenOps.Core
                                fractionalMaxPoolGrad'TensorFlow.GenOps.Core
                                fusedBatchNormTensorFlow.GenOps.Core
                                fusedBatchNorm'TensorFlow.GenOps.Core
                                fusedBatchNormGradTensorFlow.GenOps.Core
                                fusedBatchNormGrad'TensorFlow.GenOps.Core
                                fusedPadConv2DTensorFlow.GenOps.Core
                                fusedPadConv2D'TensorFlow.GenOps.Core
                                fusedResizeAndPadConv2DTensorFlow.GenOps.Core
                                fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
                                gatherTensorFlow.GenOps.Core
                                gather'TensorFlow.GenOps.Core
                                gatherNdTensorFlow.GenOps.Core
                                gatherNd'TensorFlow.GenOps.Core
                                gatherV2TensorFlow.GenOps.Core
                                gatherV2'TensorFlow.GenOps.Core
                                getSessionHandleTensorFlow.GenOps.Core
                                getSessionHandle'TensorFlow.GenOps.Core
                                getSessionHandleV2TensorFlow.GenOps.Core
                                getSessionHandleV2'TensorFlow.GenOps.Core
                                getSessionTensorTensorFlow.GenOps.Core
                                getSessionTensor'TensorFlow.GenOps.Core
                                greaterTensorFlow.GenOps.Core
                                greater'TensorFlow.GenOps.Core
                                greaterEqualTensorFlow.GenOps.Core
                                greaterEqual'TensorFlow.GenOps.Core
                                hashTableTensorFlow.GenOps.Core
                                hashTable'TensorFlow.GenOps.Core
                                hashTableV2TensorFlow.GenOps.Core
                                hashTableV2'TensorFlow.GenOps.Core
                                histogramSummaryTensorFlow.GenOps.Core
                                histogramSummary'TensorFlow.GenOps.Core
                                hSVToRGBTensorFlow.GenOps.Core
                                hSVToRGB'TensorFlow.GenOps.Core
                                identityTensorFlow.GenOps.Core
                                identity'TensorFlow.GenOps.Core
                                identityReaderTensorFlow.GenOps.Core
                                identityReader'TensorFlow.GenOps.Core
                                identityReaderV2TensorFlow.GenOps.Core
                                identityReaderV2'TensorFlow.GenOps.Core
                                iFFTTensorFlow.GenOps.Core
                                iFFT'TensorFlow.GenOps.Core
                                iFFT2DTensorFlow.GenOps.Core
                                iFFT2D'TensorFlow.GenOps.Core
                                iFFT3DTensorFlow.GenOps.Core
                                iFFT3D'TensorFlow.GenOps.Core
                                igammaTensorFlow.GenOps.Core
                                igamma'TensorFlow.GenOps.Core
                                igammacTensorFlow.GenOps.Core
                                igammac'TensorFlow.GenOps.Core
                                ignoreErrorsDatasetTensorFlow.GenOps.Core
                                ignoreErrorsDataset'TensorFlow.GenOps.Core
                                imagTensorFlow.GenOps.Core
                                imag'TensorFlow.GenOps.Core
                                imageSummaryTensorFlow.GenOps.Core
                                imageSummary'TensorFlow.GenOps.Core
                                immutableConstTensorFlow.GenOps.Core
                                immutableConst'TensorFlow.GenOps.Core
                                initializeTableTensorFlow.GenOps.Core
                                initializeTable'TensorFlow.GenOps.Core
                                initializeTableFromTextFileTensorFlow.GenOps.Core
                                initializeTableFromTextFile'TensorFlow.GenOps.Core
                                initializeTableFromTextFileV2TensorFlow.GenOps.Core
                                initializeTableFromTextFileV2'TensorFlow.GenOps.Core
                                initializeTableV2TensorFlow.GenOps.Core
                                initializeTableV2'TensorFlow.GenOps.Core
                                inTopKTensorFlow.GenOps.Core
                                inTopK'TensorFlow.GenOps.Core
                                invTensorFlow.GenOps.Core
                                inv'TensorFlow.GenOps.Core
                                invertTensorFlow.GenOps.Core
                                invert'TensorFlow.GenOps.Core
                                invertPermutationTensorFlow.GenOps.Core
                                invertPermutation'TensorFlow.GenOps.Core
                                invGradTensorFlow.GenOps.Core
                                invGrad'TensorFlow.GenOps.Core
                                iRFFTTensorFlow.GenOps.Core
                                iRFFT'TensorFlow.GenOps.Core
                                iRFFT2DTensorFlow.GenOps.Core
                                iRFFT2D'TensorFlow.GenOps.Core
                                iRFFT3DTensorFlow.GenOps.Core
                                iRFFT3D'TensorFlow.GenOps.Core
                                isFiniteTensorFlow.GenOps.Core
                                isFinite'TensorFlow.GenOps.Core
                                isInfTensorFlow.GenOps.Core
                                isInf'TensorFlow.GenOps.Core
                                isNanTensorFlow.GenOps.Core
                                isNan'TensorFlow.GenOps.Core
                                isVariableInitializedTensorFlow.GenOps.Core
                                isVariableInitialized'TensorFlow.GenOps.Core
                                iteratorTensorFlow.GenOps.Core
                                iterator'TensorFlow.GenOps.Core
                                iteratorDisposeTensorFlow.GenOps.Core
                                iteratorDispose'TensorFlow.GenOps.Core
                                iteratorFromStringHandleTensorFlow.GenOps.Core
                                iteratorFromStringHandle'TensorFlow.GenOps.Core
                                iteratorGetNextTensorFlow.GenOps.Core
                                iteratorGetNext'TensorFlow.GenOps.Core
                                iteratorToStringHandleTensorFlow.GenOps.Core
                                iteratorToStringHandle'TensorFlow.GenOps.Core
                                l2LossTensorFlow.GenOps.Core
                                l2Loss'TensorFlow.GenOps.Core
                                learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
                                learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
                                lessTensorFlow.GenOps.Core
                                less'TensorFlow.GenOps.Core
                                lessEqualTensorFlow.GenOps.Core
                                lessEqual'TensorFlow.GenOps.Core
                                lgammaTensorFlow.GenOps.Core
                                lgamma'TensorFlow.GenOps.Core
                                linSpaceTensorFlow.GenOps.Core
                                linSpace'TensorFlow.GenOps.Core
                                listDiffTensorFlow.GenOps.Core
                                listDiff'TensorFlow.GenOps.Core
                                lMDBReaderTensorFlow.GenOps.Core
                                lMDBReader'TensorFlow.GenOps.Core
                                logTensorFlow.GenOps.Core
                                log'TensorFlow.GenOps.Core
                                log1pTensorFlow.GenOps.Core
                                log1p'TensorFlow.GenOps.Core
                                logicalAndTensorFlow.GenOps.Core
                                logicalAnd'TensorFlow.GenOps.Core
                                logicalNotTensorFlow.GenOps.Core
                                logicalNot'TensorFlow.GenOps.Core
                                logicalOrTensorFlow.GenOps.Core
                                logicalOr'TensorFlow.GenOps.Core
                                logSoftmaxTensorFlow.GenOps.Core
                                logSoftmax'TensorFlow.GenOps.Core
                                logUniformCandidateSamplerTensorFlow.GenOps.Core
                                logUniformCandidateSampler'TensorFlow.GenOps.Core
                                lookupTableExportTensorFlow.GenOps.Core
                                lookupTableExport'TensorFlow.GenOps.Core
                                lookupTableExportV2TensorFlow.GenOps.Core
                                lookupTableExportV2'TensorFlow.GenOps.Core
                                lookupTableFindTensorFlow.GenOps.Core
                                lookupTableFind'TensorFlow.GenOps.Core
                                lookupTableFindV2TensorFlow.GenOps.Core
                                lookupTableFindV2'TensorFlow.GenOps.Core
                                lookupTableImportTensorFlow.GenOps.Core
                                lookupTableImport'TensorFlow.GenOps.Core
                                lookupTableImportV2TensorFlow.GenOps.Core
                                lookupTableImportV2'TensorFlow.GenOps.Core
                                lookupTableInsertTensorFlow.GenOps.Core
                                lookupTableInsert'TensorFlow.GenOps.Core
                                lookupTableInsertV2TensorFlow.GenOps.Core
                                lookupTableInsertV2'TensorFlow.GenOps.Core
                                lookupTableSizeTensorFlow.GenOps.Core
                                lookupTableSize'TensorFlow.GenOps.Core
                                lookupTableSizeV2TensorFlow.GenOps.Core
                                lookupTableSizeV2'TensorFlow.GenOps.Core
                                loopCondTensorFlow.GenOps.Core
                                loopCond'TensorFlow.GenOps.Core
                                lRNTensorFlow.GenOps.Core
                                lRN'TensorFlow.GenOps.Core
                                lRNGradTensorFlow.GenOps.Core
                                lRNGrad'TensorFlow.GenOps.Core
                                makeIteratorTensorFlow.GenOps.Core
                                makeIterator'TensorFlow.GenOps.Core
                                mapClearTensorFlow.GenOps.Core
                                mapClear'TensorFlow.GenOps.Core
                                mapIncompleteSizeTensorFlow.GenOps.Core
                                mapIncompleteSize'TensorFlow.GenOps.Core
                                mapPeekTensorFlow.GenOps.Core
                                mapPeek'TensorFlow.GenOps.Core
                                mapSizeTensorFlow.GenOps.Core
                                mapSize'TensorFlow.GenOps.Core
                                mapStageTensorFlow.GenOps.Core
                                mapStage'TensorFlow.GenOps.Core
                                mapUnstageTensorFlow.GenOps.Core
                                mapUnstage'TensorFlow.GenOps.Core
                                mapUnstageNoKeyTensorFlow.GenOps.Core
                                mapUnstageNoKey'TensorFlow.GenOps.Core
                                matchingFilesTensorFlow.GenOps.Core
                                matchingFiles'TensorFlow.GenOps.Core
                                matMulTensorFlow.GenOps.Core
                                matMul'TensorFlow.GenOps.Core
                                matrixBandPartTensorFlow.GenOps.Core
                                matrixBandPart'TensorFlow.GenOps.Core
                                matrixDeterminantTensorFlow.GenOps.Core
                                matrixDeterminant'TensorFlow.GenOps.Core
                                matrixDiagTensorFlow.GenOps.Core
                                matrixDiag'TensorFlow.GenOps.Core
                                matrixDiagPartTensorFlow.GenOps.Core
                                matrixDiagPart'TensorFlow.GenOps.Core
                                matrixInverseTensorFlow.GenOps.Core
                                matrixInverse'TensorFlow.GenOps.Core
                                matrixSetDiagTensorFlow.GenOps.Core
                                matrixSetDiag'TensorFlow.GenOps.Core
                                matrixSolveTensorFlow.GenOps.Core
                                matrixSolve'TensorFlow.GenOps.Core
                                matrixSolveLsTensorFlow.GenOps.Core
                                matrixSolveLs'TensorFlow.GenOps.Core
                                matrixTriangularSolveTensorFlow.GenOps.Core
                                matrixTriangularSolve'TensorFlow.GenOps.Core
                                maxTensorFlow.GenOps.Core
                                max'TensorFlow.GenOps.Core
                                maximumTensorFlow.GenOps.Core
                                maximum'TensorFlow.GenOps.Core
                                maxPoolTensorFlow.GenOps.Core
                                maxPool'TensorFlow.GenOps.Core
                                maxPool3DTensorFlow.GenOps.Core
                                maxPool3D'TensorFlow.GenOps.Core
                                maxPool3DGradTensorFlow.GenOps.Core
                                maxPool3DGrad'TensorFlow.GenOps.Core
                                maxPool3DGradGradTensorFlow.GenOps.Core
                                maxPool3DGradGrad'TensorFlow.GenOps.Core
                                maxPoolGradTensorFlow.GenOps.Core
                                maxPoolGrad'TensorFlow.GenOps.Core
                                maxPoolGradGradTensorFlow.GenOps.Core
                                maxPoolGradGrad'TensorFlow.GenOps.Core
                                maxPoolGradGradWithArgmaxTensorFlow.GenOps.Core
                                maxPoolGradGradWithArgmax'TensorFlow.GenOps.Core
                                maxPoolGradWithArgmaxTensorFlow.GenOps.Core
                                maxPoolGradWithArgmax'TensorFlow.GenOps.Core
                                maxPoolWithArgmaxTensorFlow.GenOps.Core
                                maxPoolWithArgmax'TensorFlow.GenOps.Core
                                meanTensorFlow.GenOps.Core
                                mean'TensorFlow.GenOps.Core
                                mergeTensorFlow.GenOps.Core
                                merge'TensorFlow.GenOps.Core
                                mergeSummaryTensorFlow.GenOps.Core
                                mergeSummary'TensorFlow.GenOps.Core
                                mergeV2CheckpointsTensorFlow.GenOps.Core
                                mergeV2Checkpoints'TensorFlow.GenOps.Core
                                mfccTensorFlow.GenOps.Core
                                mfcc'TensorFlow.GenOps.Core
                                minTensorFlow.GenOps.Core
                                min'TensorFlow.GenOps.Core
                                minimumTensorFlow.GenOps.Core
                                minimum'TensorFlow.GenOps.Core
                                mirrorPadTensorFlow.GenOps.Core
                                mirrorPad'TensorFlow.GenOps.Core
                                mirrorPadGradTensorFlow.GenOps.Core
                                mirrorPadGrad'TensorFlow.GenOps.Core
                                modTensorFlow.GenOps.Core
                                mod'TensorFlow.GenOps.Core
                                mulTensorFlow.GenOps.Core
                                mul'TensorFlow.GenOps.Core
                                multinomialTensorFlow.GenOps.Core
                                multinomial'TensorFlow.GenOps.Core
                                mutableDenseHashTableTensorFlow.GenOps.Core
                                mutableDenseHashTable'TensorFlow.GenOps.Core
                                mutableDenseHashTableV2TensorFlow.GenOps.Core
                                mutableDenseHashTableV2'TensorFlow.GenOps.Core
                                mutableHashTableTensorFlow.GenOps.Core
                                mutableHashTable'TensorFlow.GenOps.Core
                                mutableHashTableOfTensorsTensorFlow.GenOps.Core
                                mutableHashTableOfTensors'TensorFlow.GenOps.Core
                                mutableHashTableOfTensorsV2TensorFlow.GenOps.Core
                                mutableHashTableOfTensorsV2'TensorFlow.GenOps.Core
                                mutableHashTableV2TensorFlow.GenOps.Core
                                mutableHashTableV2'TensorFlow.GenOps.Core
                                negTensorFlow.GenOps.Core
                                neg'TensorFlow.GenOps.Core
                                negTrainTensorFlow.GenOps.Core
                                negTrain'TensorFlow.GenOps.Core
                                nextIterationTensorFlow.GenOps.Core
                                nextIteration'TensorFlow.GenOps.Core
                                nonMaxSuppressionTensorFlow.GenOps.Core
                                nonMaxSuppression'TensorFlow.GenOps.Core
                                nonMaxSuppressionV2TensorFlow.GenOps.Core
                                nonMaxSuppressionV2'TensorFlow.GenOps.Core
                                noOpTensorFlow.GenOps.Core
                                noOp'TensorFlow.GenOps.Core
                                notEqualTensorFlow.GenOps.Core
                                notEqual'TensorFlow.GenOps.Core
                                oneHotTensorFlow.GenOps.Core
                                oneHot'TensorFlow.GenOps.Core
                                onesLikeTensorFlow.GenOps.Core
                                onesLike'TensorFlow.GenOps.Core
                                orderedMapClearTensorFlow.GenOps.Core
                                orderedMapClear'TensorFlow.GenOps.Core
                                orderedMapIncompleteSizeTensorFlow.GenOps.Core
                                orderedMapIncompleteSize'TensorFlow.GenOps.Core
                                orderedMapPeekTensorFlow.GenOps.Core
                                orderedMapPeek'TensorFlow.GenOps.Core
                                orderedMapSizeTensorFlow.GenOps.Core
                                orderedMapSize'TensorFlow.GenOps.Core
                                orderedMapStageTensorFlow.GenOps.Core
                                orderedMapStage'TensorFlow.GenOps.Core
                                orderedMapUnstageTensorFlow.GenOps.Core
                                orderedMapUnstage'TensorFlow.GenOps.Core
                                orderedMapUnstageNoKeyTensorFlow.GenOps.Core
                                orderedMapUnstageNoKey'TensorFlow.GenOps.Core
                                packTensorFlow.GenOps.Core
                                pack'TensorFlow.GenOps.Core
                                padTensorFlow.GenOps.Core
                                pad'TensorFlow.GenOps.Core
                                paddedBatchDatasetTensorFlow.GenOps.Core
                                paddedBatchDataset'TensorFlow.GenOps.Core
                                paddingFIFOQueueTensorFlow.GenOps.Core
                                paddingFIFOQueue'TensorFlow.GenOps.Core
                                paddingFIFOQueueV2TensorFlow.GenOps.Core
                                paddingFIFOQueueV2'TensorFlow.GenOps.Core
                                padV2TensorFlow.GenOps.Core
                                padV2'TensorFlow.GenOps.Core
                                parallelConcatTensorFlow.GenOps.Core
                                parallelConcat'TensorFlow.GenOps.Core
                                parameterizedTruncatedNormalTensorFlow.GenOps.Core
                                parameterizedTruncatedNormal'TensorFlow.GenOps.Core
                                parseExampleTensorFlow.GenOps.Core
                                parseExample'TensorFlow.GenOps.Core
                                parseSingleSequenceExampleTensorFlow.GenOps.Core
                                parseSingleSequenceExample'TensorFlow.GenOps.Core
                                parseTensorTensorFlow.GenOps.Core
                                parseTensor'TensorFlow.GenOps.Core
                                placeholderTensorFlow.GenOps.Core
                                placeholder'TensorFlow.GenOps.Core
                                placeholderV2TensorFlow.GenOps.Core
                                placeholderV2'TensorFlow.GenOps.Core
                                placeholderWithDefaultTensorFlow.GenOps.Core
                                placeholderWithDefault'TensorFlow.GenOps.Core
                                polygammaTensorFlow.GenOps.Core
                                polygamma'TensorFlow.GenOps.Core
                                powTensorFlow.GenOps.Core
                                pow'TensorFlow.GenOps.Core
                                preventGradientTensorFlow.GenOps.Core
                                preventGradient'TensorFlow.GenOps.Core
                                printTensorFlow.GenOps.Core
                                print'TensorFlow.GenOps.Core
                                priorityQueueTensorFlow.GenOps.Core
                                priorityQueue'TensorFlow.GenOps.Core
                                priorityQueueV2TensorFlow.GenOps.Core
                                priorityQueueV2'TensorFlow.GenOps.Core
                                prodTensorFlow.GenOps.Core
                                prod'TensorFlow.GenOps.Core
                                qrTensorFlow.GenOps.Core
                                qr'TensorFlow.GenOps.Core
                                quantizeAndDequantizeTensorFlow.GenOps.Core
                                quantizeAndDequantize'TensorFlow.GenOps.Core
                                quantizeAndDequantizeV2TensorFlow.GenOps.Core
                                quantizeAndDequantizeV2'TensorFlow.GenOps.Core
                                quantizeAndDequantizeV3TensorFlow.GenOps.Core
                                quantizeAndDequantizeV3'TensorFlow.GenOps.Core
                                quantizedAddTensorFlow.GenOps.Core
                                quantizedAdd'TensorFlow.GenOps.Core
                                quantizedAvgPoolTensorFlow.GenOps.Core
                                quantizedAvgPool'TensorFlow.GenOps.Core
                                quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
                                quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
                                quantizedBiasAddTensorFlow.GenOps.Core
                                quantizedBiasAdd'TensorFlow.GenOps.Core
                                quantizedConcatTensorFlow.GenOps.Core
                                quantizedConcat'TensorFlow.GenOps.Core
                                quantizedConv2DTensorFlow.GenOps.Core
                                quantizedConv2D'TensorFlow.GenOps.Core
                                quantizedInstanceNormTensorFlow.GenOps.Core
                                quantizedInstanceNorm'TensorFlow.GenOps.Core
                                quantizedMatMulTensorFlow.GenOps.Core
                                quantizedMatMul'TensorFlow.GenOps.Core
                                quantizedMaxPoolTensorFlow.GenOps.Core
                                quantizedMaxPool'TensorFlow.GenOps.Core
                                quantizedMulTensorFlow.GenOps.Core
                                quantizedMul'TensorFlow.GenOps.Core
                                quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
                                quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
                                quantizedReluTensorFlow.GenOps.Core
                                quantizedRelu'TensorFlow.GenOps.Core
                                quantizedRelu6TensorFlow.GenOps.Core
                                quantizedRelu6'TensorFlow.GenOps.Core
                                quantizedReluXTensorFlow.GenOps.Core
                                quantizedReluX'TensorFlow.GenOps.Core
                                quantizedReshapeTensorFlow.GenOps.Core
                                quantizedReshape'TensorFlow.GenOps.Core
                                quantizedResizeBilinearTensorFlow.GenOps.Core
                                quantizedResizeBilinear'TensorFlow.GenOps.Core
                                quantizeV2TensorFlow.GenOps.Core
                                quantizeV2'TensorFlow.GenOps.Core
                                queueCloseTensorFlow.GenOps.Core
                                queueClose'TensorFlow.GenOps.Core
                                queueCloseV2TensorFlow.GenOps.Core
                                queueCloseV2'TensorFlow.GenOps.Core
                                queueDequeueTensorFlow.GenOps.Core
                                queueDequeue'TensorFlow.GenOps.Core
                                queueDequeueManyTensorFlow.GenOps.Core
                                queueDequeueMany'TensorFlow.GenOps.Core
                                queueDequeueManyV2TensorFlow.GenOps.Core
                                queueDequeueManyV2'TensorFlow.GenOps.Core
                                queueDequeueUpToTensorFlow.GenOps.Core
                                queueDequeueUpTo'TensorFlow.GenOps.Core
                                queueDequeueUpToV2TensorFlow.GenOps.Core
                                queueDequeueUpToV2'TensorFlow.GenOps.Core
                                queueDequeueV2TensorFlow.GenOps.Core
                                queueDequeueV2'TensorFlow.GenOps.Core
                                queueEnqueueTensorFlow.GenOps.Core
                                queueEnqueue'TensorFlow.GenOps.Core
                                queueEnqueueManyTensorFlow.GenOps.Core
                                queueEnqueueMany'TensorFlow.GenOps.Core
                                queueEnqueueManyV2TensorFlow.GenOps.Core
                                queueEnqueueManyV2'TensorFlow.GenOps.Core
                                queueEnqueueV2TensorFlow.GenOps.Core
                                queueEnqueueV2'TensorFlow.GenOps.Core
                                queueIsClosedTensorFlow.GenOps.Core
                                queueIsClosed'TensorFlow.GenOps.Core
                                queueIsClosedV2TensorFlow.GenOps.Core
                                queueIsClosedV2'TensorFlow.GenOps.Core
                                queueSizeTensorFlow.GenOps.Core
                                queueSize'TensorFlow.GenOps.Core
                                queueSizeV2TensorFlow.GenOps.Core
                                queueSizeV2'TensorFlow.GenOps.Core
                                randomCropTensorFlow.GenOps.Core
                                randomCrop'TensorFlow.GenOps.Core
                                randomGammaTensorFlow.GenOps.Core
                                randomGamma'TensorFlow.GenOps.Core
                                randomPoissonTensorFlow.GenOps.Core
                                randomPoisson'TensorFlow.GenOps.Core
                                randomShuffleTensorFlow.GenOps.Core
                                randomShuffle'TensorFlow.GenOps.Core
                                randomShuffleQueueTensorFlow.GenOps.Core
                                randomShuffleQueue'TensorFlow.GenOps.Core
                                randomShuffleQueueV2TensorFlow.GenOps.Core
                                randomShuffleQueueV2'TensorFlow.GenOps.Core
                                randomStandardNormalTensorFlow.GenOps.Core
                                randomStandardNormal'TensorFlow.GenOps.Core
                                randomUniformTensorFlow.GenOps.Core
                                randomUniform'TensorFlow.GenOps.Core
                                randomUniformIntTensorFlow.GenOps.Core
                                randomUniformInt'TensorFlow.GenOps.Core
                                rangeTensorFlow.GenOps.Core
                                range'TensorFlow.GenOps.Core
                                rangeDatasetTensorFlow.GenOps.Core
                                rangeDataset'TensorFlow.GenOps.Core
                                rankTensorFlow.GenOps.Core
                                rank'TensorFlow.GenOps.Core
                                readerNumRecordsProducedTensorFlow.GenOps.Core
                                readerNumRecordsProduced'TensorFlow.GenOps.Core
                                readerNumRecordsProducedV2TensorFlow.GenOps.Core
                                readerNumRecordsProducedV2'TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
                                readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
                                readerReadTensorFlow.GenOps.Core
                                readerRead'TensorFlow.GenOps.Core
                                readerReadUpToTensorFlow.GenOps.Core
                                readerReadUpTo'TensorFlow.GenOps.Core
                                readerReadUpToV2TensorFlow.GenOps.Core
                                readerReadUpToV2'TensorFlow.GenOps.Core
                                readerReadV2TensorFlow.GenOps.Core
                                readerReadV2'TensorFlow.GenOps.Core
                                readerResetTensorFlow.GenOps.Core
                                readerReset'TensorFlow.GenOps.Core
                                readerResetV2TensorFlow.GenOps.Core
                                readerResetV2'TensorFlow.GenOps.Core
                                readerRestoreStateTensorFlow.GenOps.Core
                                readerRestoreState'TensorFlow.GenOps.Core
                                readerRestoreStateV2TensorFlow.GenOps.Core
                                readerRestoreStateV2'TensorFlow.GenOps.Core
                                readerSerializeStateTensorFlow.GenOps.Core
                                readerSerializeState'TensorFlow.GenOps.Core
                                readerSerializeStateV2TensorFlow.GenOps.Core
                                readerSerializeStateV2'TensorFlow.GenOps.Core
                                readFileTensorFlow.GenOps.Core
                                readFile'TensorFlow.GenOps.Core
                                readVariableOpTensorFlow.GenOps.Core
                                readVariableOp'TensorFlow.GenOps.Core
                                realTensorFlow.GenOps.Core
                                real'TensorFlow.GenOps.Core
                                realDivTensorFlow.GenOps.Core
                                realDiv'TensorFlow.GenOps.Core
                                reciprocalTensorFlow.GenOps.Core
                                reciprocal'TensorFlow.GenOps.Core
                                reciprocalGradTensorFlow.GenOps.Core
                                reciprocalGrad'TensorFlow.GenOps.Core
                                recordInputTensorFlow.GenOps.Core
                                recordInput'TensorFlow.GenOps.Core
                                reduceJoinTensorFlow.GenOps.Core
                                reduceJoin'TensorFlow.GenOps.Core
                                refEnterTensorFlow.GenOps.Core
                                refEnter'TensorFlow.GenOps.Core
                                refExitTensorFlow.GenOps.Core
                                refExit'TensorFlow.GenOps.Core
                                refIdentityTensorFlow.GenOps.Core
                                refIdentity'TensorFlow.GenOps.Core
                                refMergeTensorFlow.GenOps.Core
                                refMerge'TensorFlow.GenOps.Core
                                refNextIterationTensorFlow.GenOps.Core
                                refNextIteration'TensorFlow.GenOps.Core
                                refSelectTensorFlow.GenOps.Core
                                refSelect'TensorFlow.GenOps.Core
                                refSwitchTensorFlow.GenOps.Core
                                refSwitch'TensorFlow.GenOps.Core
                                reluTensorFlow.GenOps.Core
                                relu'TensorFlow.GenOps.Core
                                relu6TensorFlow.GenOps.Core
                                relu6'TensorFlow.GenOps.Core
                                relu6GradTensorFlow.GenOps.Core
                                relu6Grad'TensorFlow.GenOps.Core
                                reluGradTensorFlow.GenOps.Core
                                reluGrad'TensorFlow.GenOps.Core
                                remoteFusedGraphExecuteTensorFlow.GenOps.Core
                                remoteFusedGraphExecute'TensorFlow.GenOps.Core
                                repeatDatasetTensorFlow.GenOps.Core
                                repeatDataset'TensorFlow.GenOps.Core
                                requantizationRangeTensorFlow.GenOps.Core
                                requantizationRange'TensorFlow.GenOps.Core
                                requantizeTensorFlow.GenOps.Core
                                requantize'TensorFlow.GenOps.Core
                                reshapeTensorFlow.GenOps.Core
                                reshape'TensorFlow.GenOps.Core
                                resizeAreaTensorFlow.GenOps.Core
                                resizeArea'TensorFlow.GenOps.Core
                                resizeBicubicTensorFlow.GenOps.Core
                                resizeBicubic'TensorFlow.GenOps.Core
                                resizeBilinearTensorFlow.GenOps.Core
                                resizeBilinear'TensorFlow.GenOps.Core
                                resizeBilinearGradTensorFlow.GenOps.Core
                                resizeBilinearGrad'TensorFlow.GenOps.Core
                                resizeNearestNeighborTensorFlow.GenOps.Core
                                resizeNearestNeighbor'TensorFlow.GenOps.Core
                                resizeNearestNeighborGradTensorFlow.GenOps.Core
                                resizeNearestNeighborGrad'TensorFlow.GenOps.Core
                                resourceApplyAdadeltaTensorFlow.GenOps.Core
                                resourceApplyAdadelta'TensorFlow.GenOps.Core
                                resourceApplyAdagradTensorFlow.GenOps.Core
                                resourceApplyAdagrad'TensorFlow.GenOps.Core
                                resourceApplyAdagradDATensorFlow.GenOps.Core
                                resourceApplyAdagradDA'TensorFlow.GenOps.Core
                                resourceApplyAdamTensorFlow.GenOps.Core
                                resourceApplyAdam'TensorFlow.GenOps.Core
                                resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
                                resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                resourceApplyFtrlTensorFlow.GenOps.Core
                                resourceApplyFtrl'TensorFlow.GenOps.Core
                                resourceApplyFtrlV2TensorFlow.GenOps.Core
                                resourceApplyFtrlV2'TensorFlow.GenOps.Core
                                resourceApplyGradientDescentTensorFlow.GenOps.Core
                                resourceApplyGradientDescent'TensorFlow.GenOps.Core
                                resourceApplyMomentumTensorFlow.GenOps.Core
                                resourceApplyMomentum'TensorFlow.GenOps.Core
                                resourceApplyProximalAdagradTensorFlow.GenOps.Core
                                resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
                                resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
                                resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                resourceApplyRMSPropTensorFlow.GenOps.Core
                                resourceApplyRMSProp'TensorFlow.GenOps.Core
                                resourceGatherTensorFlow.GenOps.Core
                                resourceGather'TensorFlow.GenOps.Core
                                resourceScatterAddTensorFlow.GenOps.Core
                                resourceScatterAdd'TensorFlow.GenOps.Core
                                resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
                                resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
                                resourceSparseApplyAdagradTensorFlow.GenOps.Core
                                resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
                                resourceSparseApplyAdagradDATensorFlow.GenOps.Core
                                resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
                                resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
                                resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                resourceSparseApplyFtrlTensorFlow.GenOps.Core
                                resourceSparseApplyFtrl'TensorFlow.GenOps.Core
                                resourceSparseApplyFtrlV2TensorFlow.GenOps.Core
                                resourceSparseApplyFtrlV2'TensorFlow.GenOps.Core
                                resourceSparseApplyMomentumTensorFlow.GenOps.Core
                                resourceSparseApplyMomentum'TensorFlow.GenOps.Core
                                resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
                                resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
                                resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
                                resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                resourceSparseApplyRMSPropTensorFlow.GenOps.Core
                                resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
                                resourceStridedSliceAssignTensorFlow.GenOps.Core
                                resourceStridedSliceAssign'TensorFlow.GenOps.Core
                                restoreTensorFlow.GenOps.Core
                                restore'TensorFlow.GenOps.Core
                                restoreSliceTensorFlow.GenOps.Core
                                restoreSlice'TensorFlow.GenOps.Core
                                restoreV2TensorFlow.GenOps.Core
                                restoreV2'TensorFlow.GenOps.Core
                                reverseTensorFlow.GenOps.Core
                                reverse'TensorFlow.GenOps.Core
                                reverseSequenceTensorFlow.GenOps.Core
                                reverseSequence'TensorFlow.GenOps.Core
                                reverseV2TensorFlow.GenOps.Core
                                reverseV2'TensorFlow.GenOps.Core
                                rFFTTensorFlow.GenOps.Core
                                rFFT'TensorFlow.GenOps.Core
                                rFFT2DTensorFlow.GenOps.Core
                                rFFT2D'TensorFlow.GenOps.Core
                                rFFT3DTensorFlow.GenOps.Core
                                rFFT3D'TensorFlow.GenOps.Core
                                rGBToHSVTensorFlow.GenOps.Core
                                rGBToHSV'TensorFlow.GenOps.Core
                                rintTensorFlow.GenOps.Core
                                rint'TensorFlow.GenOps.Core
                                roundTensorFlow.GenOps.Core
                                round'TensorFlow.GenOps.Core
                                rsqrtTensorFlow.GenOps.Core
                                rsqrt'TensorFlow.GenOps.Core
                                rsqrtGradTensorFlow.GenOps.Core
                                rsqrtGrad'TensorFlow.GenOps.Core
                                sampleDistortedBoundingBoxTensorFlow.GenOps.Core
                                sampleDistortedBoundingBox'TensorFlow.GenOps.Core
                                sampleDistortedBoundingBoxV2TensorFlow.GenOps.Core
                                sampleDistortedBoundingBoxV2'TensorFlow.GenOps.Core
                                saveTensorFlow.GenOps.Core
                                save'TensorFlow.GenOps.Core
                                saveSlicesTensorFlow.GenOps.Core
                                saveSlices'TensorFlow.GenOps.Core
                                saveV2TensorFlow.GenOps.Core
                                saveV2'TensorFlow.GenOps.Core
                                scalarSummaryTensorFlow.GenOps.Core
                                scalarSummary'TensorFlow.GenOps.Core
                                scatterAddTensorFlow.GenOps.Core
                                scatterAdd'TensorFlow.GenOps.Core
                                scatterDivTensorFlow.GenOps.Core
                                scatterDiv'TensorFlow.GenOps.Core
                                scatterMulTensorFlow.GenOps.Core
                                scatterMul'TensorFlow.GenOps.Core
                                scatterNdTensorFlow.GenOps.Core
                                scatterNd'TensorFlow.GenOps.Core
                                scatterNdAddTensorFlow.GenOps.Core
                                scatterNdAdd'TensorFlow.GenOps.Core
                                scatterNdNonAliasingAddTensorFlow.GenOps.Core
                                scatterNdNonAliasingAdd'TensorFlow.GenOps.Core
                                scatterNdSubTensorFlow.GenOps.Core
                                scatterNdSub'TensorFlow.GenOps.Core
                                scatterNdUpdateTensorFlow.GenOps.Core
                                scatterNdUpdate'TensorFlow.GenOps.Core
                                scatterSubTensorFlow.GenOps.Core
                                scatterSub'TensorFlow.GenOps.Core
                                scatterUpdateTensorFlow.GenOps.Core
                                scatterUpdate'TensorFlow.GenOps.Core
                                sdcaFprintTensorFlow.GenOps.Core
                                sdcaFprint'TensorFlow.GenOps.Core
                                sdcaOptimizerTensorFlow.GenOps.Core
                                sdcaOptimizer'TensorFlow.GenOps.Core
                                sdcaShrinkL1TensorFlow.GenOps.Core
                                sdcaShrinkL1'TensorFlow.GenOps.Core
                                segmentMaxTensorFlow.GenOps.Core
                                segmentMax'TensorFlow.GenOps.Core
                                segmentMeanTensorFlow.GenOps.Core
                                segmentMean'TensorFlow.GenOps.Core
                                segmentMinTensorFlow.GenOps.Core
                                segmentMin'TensorFlow.GenOps.Core
                                segmentProdTensorFlow.GenOps.Core
                                segmentProd'TensorFlow.GenOps.Core
                                segmentSumTensorFlow.GenOps.Core
                                segmentSum'TensorFlow.GenOps.Core
                                selectTensorFlow.GenOps.Core
                                select'TensorFlow.GenOps.Core
                                selfAdjointEigTensorFlow.GenOps.Core
                                selfAdjointEig'TensorFlow.GenOps.Core
                                selfAdjointEigV2TensorFlow.GenOps.Core
                                selfAdjointEigV2'TensorFlow.GenOps.Core
                                serializeManySparseTensorFlow.GenOps.Core
                                serializeManySparse'TensorFlow.GenOps.Core
                                serializeSparseTensorFlow.GenOps.Core
                                serializeSparse'TensorFlow.GenOps.Core
                                setSizeTensorFlow.GenOps.Core
                                setSize'TensorFlow.GenOps.Core
                                shapeTensorFlow.GenOps.Core
                                shape'TensorFlow.GenOps.Core
                                shapeNTensorFlow.GenOps.Core
                                shapeN'TensorFlow.GenOps.Core
                                shardedFilenameTensorFlow.GenOps.Core
                                shardedFilename'TensorFlow.GenOps.Core
                                shardedFilespecTensorFlow.GenOps.Core
                                shardedFilespec'TensorFlow.GenOps.Core
                                shuffleDatasetTensorFlow.GenOps.Core
                                shuffleDataset'TensorFlow.GenOps.Core
                                sigmoidTensorFlow.GenOps.Core
                                sigmoid'TensorFlow.GenOps.Core
                                sigmoidGradTensorFlow.GenOps.Core
                                sigmoidGrad'TensorFlow.GenOps.Core
                                signTensorFlow.GenOps.Core
                                sign'TensorFlow.GenOps.Core
                                sinTensorFlow.GenOps.Core
                                sin'TensorFlow.GenOps.Core
                                sinhTensorFlow.GenOps.Core
                                sinh'TensorFlow.GenOps.Core
                                sizeTensorFlow.GenOps.Core
                                size'TensorFlow.GenOps.Core
                                skipDatasetTensorFlow.GenOps.Core
                                skipDataset'TensorFlow.GenOps.Core
                                skipgramTensorFlow.GenOps.Core
                                skipgram'TensorFlow.GenOps.Core
                                sliceTensorFlow.GenOps.Core
                                slice'TensorFlow.GenOps.Core
                                softmaxTensorFlow.GenOps.Core
                                softmax'TensorFlow.GenOps.Core
                                softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
                                softmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
                                softplusTensorFlow.GenOps.Core
                                softplus'TensorFlow.GenOps.Core
                                softplusGradTensorFlow.GenOps.Core
                                softplusGrad'TensorFlow.GenOps.Core
                                softsignTensorFlow.GenOps.Core
                                softsign'TensorFlow.GenOps.Core
                                softsignGradTensorFlow.GenOps.Core
                                softsignGrad'TensorFlow.GenOps.Core
                                spaceToBatchTensorFlow.GenOps.Core
                                spaceToBatch'TensorFlow.GenOps.Core
                                spaceToBatchNDTensorFlow.GenOps.Core
                                spaceToBatchND'TensorFlow.GenOps.Core
                                spaceToDepthTensorFlow.GenOps.Core
                                spaceToDepth'TensorFlow.GenOps.Core
                                sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
                                sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
                                sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
                                sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
                                sparseAddTensorFlow.GenOps.Core
                                sparseAdd'TensorFlow.GenOps.Core
                                sparseAddGradTensorFlow.GenOps.Core
                                sparseAddGrad'TensorFlow.GenOps.Core
                                sparseApplyAdadeltaTensorFlow.GenOps.Core
                                sparseApplyAdadelta'TensorFlow.GenOps.Core
                                sparseApplyAdagradTensorFlow.GenOps.Core
                                sparseApplyAdagrad'TensorFlow.GenOps.Core
                                sparseApplyAdagradDATensorFlow.GenOps.Core
                                sparseApplyAdagradDA'TensorFlow.GenOps.Core
                                sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
                                sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                sparseApplyFtrlTensorFlow.GenOps.Core
                                sparseApplyFtrl'TensorFlow.GenOps.Core
                                sparseApplyFtrlV2TensorFlow.GenOps.Core
                                sparseApplyFtrlV2'TensorFlow.GenOps.Core
                                sparseApplyMomentumTensorFlow.GenOps.Core
                                sparseApplyMomentum'TensorFlow.GenOps.Core
                                sparseApplyProximalAdagradTensorFlow.GenOps.Core
                                sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
                                sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
                                sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                sparseApplyRMSPropTensorFlow.GenOps.Core
                                sparseApplyRMSProp'TensorFlow.GenOps.Core
                                sparseConcatTensorFlow.GenOps.Core
                                sparseConcat'TensorFlow.GenOps.Core
                                sparseConditionalAccumulatorTensorFlow.GenOps.Core
                                sparseConditionalAccumulator'TensorFlow.GenOps.Core
                                sparseCrossTensorFlow.GenOps.Core
                                sparseCross'TensorFlow.GenOps.Core
                                sparseDenseCwiseAddTensorFlow.GenOps.Core
                                sparseDenseCwiseAdd'TensorFlow.GenOps.Core
                                sparseDenseCwiseDivTensorFlow.GenOps.Core
                                sparseDenseCwiseDiv'TensorFlow.GenOps.Core
                                sparseDenseCwiseMulTensorFlow.GenOps.Core
                                sparseDenseCwiseMul'TensorFlow.GenOps.Core
                                sparseFillEmptyRowsTensorFlow.GenOps.Core
                                sparseFillEmptyRows'TensorFlow.GenOps.Core
                                sparseFillEmptyRowsGradTensorFlow.GenOps.Core
                                sparseFillEmptyRowsGrad'TensorFlow.GenOps.Core
                                sparseMatMulTensorFlow.GenOps.Core
                                sparseMatMul'TensorFlow.GenOps.Core
                                sparseReduceMaxTensorFlow.GenOps.Core
                                sparseReduceMax'TensorFlow.GenOps.Core
                                sparseReduceMaxSparseTensorFlow.GenOps.Core
                                sparseReduceMaxSparse'TensorFlow.GenOps.Core
                                sparseReduceSumTensorFlow.GenOps.Core
                                sparseReduceSum'TensorFlow.GenOps.Core
                                sparseReduceSumSparseTensorFlow.GenOps.Core
                                sparseReduceSumSparse'TensorFlow.GenOps.Core
                                sparseReorderTensorFlow.GenOps.Core
                                sparseReorder'TensorFlow.GenOps.Core
                                sparseReshapeTensorFlow.GenOps.Core
                                sparseReshape'TensorFlow.GenOps.Core
                                sparseSegmentMeanTensorFlow.GenOps.Core
                                sparseSegmentMean'TensorFlow.GenOps.Core
                                sparseSegmentMeanGradTensorFlow.GenOps.Core
                                sparseSegmentMeanGrad'TensorFlow.GenOps.Core
                                sparseSegmentSqrtNTensorFlow.GenOps.Core
                                sparseSegmentSqrtN'TensorFlow.GenOps.Core
                                sparseSegmentSqrtNGradTensorFlow.GenOps.Core
                                sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
                                sparseSegmentSumTensorFlow.GenOps.Core
                                sparseSegmentSum'TensorFlow.GenOps.Core
                                sparseSliceTensorFlow.GenOps.Core
                                sparseSlice'TensorFlow.GenOps.Core
                                sparseSoftmaxTensorFlow.GenOps.Core
                                sparseSoftmax'TensorFlow.GenOps.Core
                                sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
                                sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
                                sparseSparseMaximumTensorFlow.GenOps.Core
                                sparseSparseMaximum'TensorFlow.GenOps.Core
                                sparseSparseMinimumTensorFlow.GenOps.Core
                                sparseSparseMinimum'TensorFlow.GenOps.Core
                                sparseSplitTensorFlow.GenOps.Core
                                sparseSplit'TensorFlow.GenOps.Core
                                sparseTensorDenseAddTensorFlow.GenOps.Core
                                sparseTensorDenseAdd'TensorFlow.GenOps.Core
                                sparseTensorDenseMatMulTensorFlow.GenOps.Core
                                sparseTensorDenseMatMul'TensorFlow.GenOps.Core
                                sparseTensorSliceDatasetTensorFlow.GenOps.Core
                                sparseTensorSliceDataset'TensorFlow.GenOps.Core
                                sparseToDenseTensorFlow.GenOps.Core
                                sparseToDense'TensorFlow.GenOps.Core
                                sparseToSparseSetOperationTensorFlow.GenOps.Core
                                sparseToSparseSetOperation'TensorFlow.GenOps.Core
                                splitTensorFlow.GenOps.Core
                                split'TensorFlow.GenOps.Core
                                splitVTensorFlow.GenOps.Core
                                splitV'TensorFlow.GenOps.Core
                                sqrtTensorFlow.GenOps.Core
                                sqrt'TensorFlow.GenOps.Core
                                sqrtGradTensorFlow.GenOps.Core
                                sqrtGrad'TensorFlow.GenOps.Core
                                squareTensorFlow.GenOps.Core
                                square'TensorFlow.GenOps.Core
                                squaredDifferenceTensorFlow.GenOps.Core
                                squaredDifference'TensorFlow.GenOps.Core
                                squeezeTensorFlow.GenOps.Core
                                squeeze'TensorFlow.GenOps.Core
                                stackTensorFlow.GenOps.Core
                                stack'TensorFlow.GenOps.Core
                                stackCloseTensorFlow.GenOps.Core
                                stackClose'TensorFlow.GenOps.Core
                                stackCloseV2TensorFlow.GenOps.Core
                                stackCloseV2'TensorFlow.GenOps.Core
                                stackPopTensorFlow.GenOps.Core
                                stackPop'TensorFlow.GenOps.Core
                                stackPopV2TensorFlow.GenOps.Core
                                stackPopV2'TensorFlow.GenOps.Core
                                stackPushTensorFlow.GenOps.Core
                                stackPush'TensorFlow.GenOps.Core
                                stackPushV2TensorFlow.GenOps.Core
                                stackPushV2'TensorFlow.GenOps.Core
                                stackV2TensorFlow.GenOps.Core
                                stackV2'TensorFlow.GenOps.Core
                                stageTensorFlow.GenOps.Core
                                stage'TensorFlow.GenOps.Core
                                stageClearTensorFlow.GenOps.Core
                                stageClear'TensorFlow.GenOps.Core
                                stagePeekTensorFlow.GenOps.Core
                                stagePeek'TensorFlow.GenOps.Core
                                stageSizeTensorFlow.GenOps.Core
                                stageSize'TensorFlow.GenOps.Core
                                statelessRandomNormalTensorFlow.GenOps.Core
                                statelessRandomNormal'TensorFlow.GenOps.Core
                                statelessRandomUniformTensorFlow.GenOps.Core
                                statelessRandomUniform'TensorFlow.GenOps.Core
                                statelessTruncatedNormalTensorFlow.GenOps.Core
                                statelessTruncatedNormal'TensorFlow.GenOps.Core
                                stopGradientTensorFlow.GenOps.Core
                                stopGradient'TensorFlow.GenOps.Core
                                stridedSliceTensorFlow.GenOps.Core
                                stridedSlice'TensorFlow.GenOps.Core
                                stridedSliceAssignTensorFlow.GenOps.Core
                                stridedSliceAssign'TensorFlow.GenOps.Core
                                stridedSliceGradTensorFlow.GenOps.Core
                                stridedSliceGrad'TensorFlow.GenOps.Core
                                stringJoinTensorFlow.GenOps.Core
                                stringJoin'TensorFlow.GenOps.Core
                                stringSplitTensorFlow.GenOps.Core
                                stringSplit'TensorFlow.GenOps.Core
                                stringToHashBucketTensorFlow.GenOps.Core
                                stringToHashBucket'TensorFlow.GenOps.Core
                                stringToHashBucketFastTensorFlow.GenOps.Core
                                stringToHashBucketFast'TensorFlow.GenOps.Core
                                stringToHashBucketStrongTensorFlow.GenOps.Core
                                stringToHashBucketStrong'TensorFlow.GenOps.Core
                                stringToNumberTensorFlow.GenOps.Core
                                stringToNumber'TensorFlow.GenOps.Core
                                subTensorFlow.GenOps.Core
                                sub'TensorFlow.GenOps.Core
                                substrTensorFlow.GenOps.Core
                                substr'TensorFlow.GenOps.Core
                                sumTensorFlow.GenOps.Core
                                sum'TensorFlow.GenOps.Core
                                svdTensorFlow.GenOps.Core
                                svd'TensorFlow.GenOps.Core
                                switchTensorFlow.GenOps.Core
                                switch'TensorFlow.GenOps.Core
                                takeDatasetTensorFlow.GenOps.Core
                                takeDataset'TensorFlow.GenOps.Core
                                takeManySparseFromTensorsMapTensorFlow.GenOps.Core
                                takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
                                tanTensorFlow.GenOps.Core
                                tan'TensorFlow.GenOps.Core
                                tanhTensorFlow.GenOps.Core
                                tanh'TensorFlow.GenOps.Core
                                tanhGradTensorFlow.GenOps.Core
                                tanhGrad'TensorFlow.GenOps.Core
                                temporaryVariableTensorFlow.GenOps.Core
                                temporaryVariable'TensorFlow.GenOps.Core
                                tensorArrayTensorFlow.GenOps.Core
                                tensorArray'TensorFlow.GenOps.Core
                                tensorArrayCloseTensorFlow.GenOps.Core
                                tensorArrayClose'TensorFlow.GenOps.Core
                                tensorArrayCloseV2TensorFlow.GenOps.Core
                                tensorArrayCloseV2'TensorFlow.GenOps.Core
                                tensorArrayCloseV3TensorFlow.GenOps.Core
                                tensorArrayCloseV3'TensorFlow.GenOps.Core
                                tensorArrayConcatTensorFlow.GenOps.Core
                                tensorArrayConcat'TensorFlow.GenOps.Core
                                tensorArrayConcatV2TensorFlow.GenOps.Core
                                tensorArrayConcatV2'TensorFlow.GenOps.Core
                                tensorArrayConcatV3TensorFlow.GenOps.Core
                                tensorArrayConcatV3'TensorFlow.GenOps.Core
                                tensorArrayGatherTensorFlow.GenOps.Core
                                tensorArrayGather'TensorFlow.GenOps.Core
                                tensorArrayGatherV2TensorFlow.GenOps.Core
                                tensorArrayGatherV2'TensorFlow.GenOps.Core
                                tensorArrayGatherV3TensorFlow.GenOps.Core
                                tensorArrayGatherV3'TensorFlow.GenOps.Core
                                tensorArrayGradTensorFlow.GenOps.Core
                                tensorArrayGrad'TensorFlow.GenOps.Core
                                tensorArrayGradV2TensorFlow.GenOps.Core
                                tensorArrayGradV2'TensorFlow.GenOps.Core
                                tensorArrayGradV3TensorFlow.GenOps.Core
                                tensorArrayGradV3'TensorFlow.GenOps.Core
                                tensorArrayPackTensorFlow.GenOps.Core
                                tensorArrayPack'TensorFlow.GenOps.Core
                                tensorArrayReadTensorFlow.GenOps.Core
                                tensorArrayRead'TensorFlow.GenOps.Core
                                tensorArrayReadV2TensorFlow.GenOps.Core
                                tensorArrayReadV2'TensorFlow.GenOps.Core
                                tensorArrayReadV3TensorFlow.GenOps.Core
                                tensorArrayReadV3'TensorFlow.GenOps.Core
                                tensorArrayScatterTensorFlow.GenOps.Core
                                tensorArrayScatter'TensorFlow.GenOps.Core
                                tensorArrayScatterV2TensorFlow.GenOps.Core
                                tensorArrayScatterV2'TensorFlow.GenOps.Core
                                tensorArrayScatterV3TensorFlow.GenOps.Core
                                tensorArrayScatterV3'TensorFlow.GenOps.Core
                                tensorArraySizeTensorFlow.GenOps.Core
                                tensorArraySize'TensorFlow.GenOps.Core
                                tensorArraySizeV2TensorFlow.GenOps.Core
                                tensorArraySizeV2'TensorFlow.GenOps.Core
                                tensorArraySizeV3TensorFlow.GenOps.Core
                                tensorArraySizeV3'TensorFlow.GenOps.Core
                                tensorArraySplitTensorFlow.GenOps.Core
                                tensorArraySplit'TensorFlow.GenOps.Core
                                tensorArraySplitV2TensorFlow.GenOps.Core
                                tensorArraySplitV2'TensorFlow.GenOps.Core
                                tensorArraySplitV3TensorFlow.GenOps.Core
                                tensorArraySplitV3'TensorFlow.GenOps.Core
                                tensorArrayUnpackTensorFlow.GenOps.Core
                                tensorArrayUnpack'TensorFlow.GenOps.Core
                                tensorArrayV2TensorFlow.GenOps.Core
                                tensorArrayV2'TensorFlow.GenOps.Core
                                tensorArrayV3TensorFlow.GenOps.Core
                                tensorArrayV3'TensorFlow.GenOps.Core
                                tensorArrayWriteTensorFlow.GenOps.Core
                                tensorArrayWrite'TensorFlow.GenOps.Core
                                tensorArrayWriteV2TensorFlow.GenOps.Core
                                tensorArrayWriteV2'TensorFlow.GenOps.Core
                                tensorArrayWriteV3TensorFlow.GenOps.Core
                                tensorArrayWriteV3'TensorFlow.GenOps.Core
                                tensorDatasetTensorFlow.GenOps.Core
                                tensorDataset'TensorFlow.GenOps.Core
                                tensorSliceDatasetTensorFlow.GenOps.Core
                                tensorSliceDataset'TensorFlow.GenOps.Core
                                tensorSummaryTensorFlow.GenOps.Core
                                tensorSummary'TensorFlow.GenOps.Core
                                tensorSummaryV2TensorFlow.GenOps.Core
                                tensorSummaryV2'TensorFlow.GenOps.Core
                                textLineDatasetTensorFlow.GenOps.Core
                                textLineDataset'TensorFlow.GenOps.Core
                                textLineReaderTensorFlow.GenOps.Core
                                textLineReader'TensorFlow.GenOps.Core
                                textLineReaderV2TensorFlow.GenOps.Core
                                textLineReaderV2'TensorFlow.GenOps.Core
                                tFRecordDatasetTensorFlow.GenOps.Core
                                tFRecordDataset'TensorFlow.GenOps.Core
                                tFRecordReaderTensorFlow.GenOps.Core
                                tFRecordReader'TensorFlow.GenOps.Core
                                tFRecordReaderV2TensorFlow.GenOps.Core
                                tFRecordReaderV2'TensorFlow.GenOps.Core
                                threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
                                threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
                                tileTensorFlow.GenOps.Core
                                tile'TensorFlow.GenOps.Core
                                tileGradTensorFlow.GenOps.Core
                                tileGrad'TensorFlow.GenOps.Core
                                topKTensorFlow.GenOps.Core
                                topK'TensorFlow.GenOps.Core
                                topKV2TensorFlow.GenOps.Core
                                topKV2'TensorFlow.GenOps.Core
                                transposeTensorFlow.GenOps.Core
                                transpose'TensorFlow.GenOps.Core
                                truncateDivTensorFlow.GenOps.Core
                                truncateDiv'TensorFlow.GenOps.Core
                                truncatedNormalTensorFlow.GenOps.Core
                                truncatedNormal'TensorFlow.GenOps.Core
                                truncateModTensorFlow.GenOps.Core
                                truncateMod'TensorFlow.GenOps.Core
                                uniformCandidateSamplerTensorFlow.GenOps.Core
                                uniformCandidateSampler'TensorFlow.GenOps.Core
                                uniqueTensorFlow.GenOps.Core
                                unique'TensorFlow.GenOps.Core
                                uniqueWithCountsTensorFlow.GenOps.Core
                                uniqueWithCounts'TensorFlow.GenOps.Core
                                unpackTensorFlow.GenOps.Core
                                unpack'TensorFlow.GenOps.Core
                                unsortedSegmentMaxTensorFlow.GenOps.Core
                                unsortedSegmentMax'TensorFlow.GenOps.Core
                                unsortedSegmentSumTensorFlow.GenOps.Core
                                unsortedSegmentSum'TensorFlow.GenOps.Core
                                unstageTensorFlow.GenOps.Core
                                unstage'TensorFlow.GenOps.Core
                                varHandleOpTensorFlow.GenOps.Core
                                varHandleOp'TensorFlow.GenOps.Core
                                variableTensorFlow.GenOps.Core
                                variable'TensorFlow.GenOps.Core
                                variableV2TensorFlow.GenOps.Core
                                variableV2'TensorFlow.GenOps.Core
                                varIsInitializedOpTensorFlow.GenOps.Core
                                varIsInitializedOp'TensorFlow.GenOps.Core
                                where'TensorFlow.GenOps.Core
                                where''TensorFlow.GenOps.Core
                                wholeFileReaderTensorFlow.GenOps.Core
                                wholeFileReader'TensorFlow.GenOps.Core
                                wholeFileReaderV2TensorFlow.GenOps.Core
                                wholeFileReaderV2'TensorFlow.GenOps.Core
                                writeFileTensorFlow.GenOps.Core
                                writeFile'TensorFlow.GenOps.Core
                                zerosLikeTensorFlow.GenOps.Core
                                zerosLike'TensorFlow.GenOps.Core
                                zetaTensorFlow.GenOps.Core
                                zeta'TensorFlow.GenOps.Core
                                zipDatasetTensorFlow.GenOps.Core
                                zipDataset'TensorFlow.GenOps.Core
                                _ArgTensorFlow.GenOps.Core
                                _Arg'TensorFlow.GenOps.Core
                                _ArrayToListTensorFlow.GenOps.Core
                                _ArrayToList'TensorFlow.GenOps.Core
                                _HostCastTensorFlow.GenOps.Core
                                _HostCast'TensorFlow.GenOps.Core
                                _HostRecvTensorFlow.GenOps.Core
                                _HostRecv'TensorFlow.GenOps.Core
                                _HostSendTensorFlow.GenOps.Core
                                _HostSend'TensorFlow.GenOps.Core
                                _ListToArrayTensorFlow.GenOps.Core
                                _ListToArray'TensorFlow.GenOps.Core
                                _ParallelConcatStartTensorFlow.GenOps.Core
                                _ParallelConcatStart'TensorFlow.GenOps.Core
                                _ParallelConcatUpdateTensorFlow.GenOps.Core
                                _ParallelConcatUpdate'TensorFlow.GenOps.Core
                                _RecvTensorFlow.GenOps.Core
                                _Recv'TensorFlow.GenOps.Core
                                _RetvalTensorFlow.GenOps.Core
                                _Retval'TensorFlow.GenOps.Core
                                _SendTensorFlow.GenOps.Core
                                _Send'TensorFlow.GenOps.Core
                                _UnsafeReadVariableTensorFlow.GenOps.Core
                                _UnsafeReadVariable'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html index d6a5e1b..3c0e480 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - B)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - B

                                barrierTensorFlow.GenOps.Core
                                barrier'TensorFlow.GenOps.Core
                                barrierCloseTensorFlow.GenOps.Core
                                barrierClose'TensorFlow.GenOps.Core
                                barrierIncompleteSizeTensorFlow.GenOps.Core
                                barrierIncompleteSize'TensorFlow.GenOps.Core
                                barrierInsertManyTensorFlow.GenOps.Core
                                barrierInsertMany'TensorFlow.GenOps.Core
                                barrierReadySizeTensorFlow.GenOps.Core
                                barrierReadySize'TensorFlow.GenOps.Core
                                barrierTakeManyTensorFlow.GenOps.Core
                                barrierTakeMany'TensorFlow.GenOps.Core
                                batchCholeskyTensorFlow.GenOps.Core
                                batchCholesky'TensorFlow.GenOps.Core
                                batchCholeskyGradTensorFlow.GenOps.Core
                                batchCholeskyGrad'TensorFlow.GenOps.Core
                                batchFFTTensorFlow.GenOps.Core
                                batchFFT'TensorFlow.GenOps.Core
                                batchFFT2DTensorFlow.GenOps.Core
                                batchFFT2D'TensorFlow.GenOps.Core
                                batchFFT3DTensorFlow.GenOps.Core
                                batchFFT3D'TensorFlow.GenOps.Core
                                batchIFFTTensorFlow.GenOps.Core
                                batchIFFT'TensorFlow.GenOps.Core
                                batchIFFT2DTensorFlow.GenOps.Core
                                batchIFFT2D'TensorFlow.GenOps.Core
                                batchIFFT3DTensorFlow.GenOps.Core
                                batchIFFT3D'TensorFlow.GenOps.Core
                                batchMatMulTensorFlow.GenOps.Core
                                batchMatMul'TensorFlow.GenOps.Core
                                batchMatrixBandPartTensorFlow.GenOps.Core
                                batchMatrixBandPart'TensorFlow.GenOps.Core
                                batchMatrixDeterminantTensorFlow.GenOps.Core
                                batchMatrixDeterminant'TensorFlow.GenOps.Core
                                batchMatrixDiagTensorFlow.GenOps.Core
                                batchMatrixDiag'TensorFlow.GenOps.Core
                                batchMatrixDiagPartTensorFlow.GenOps.Core
                                batchMatrixDiagPart'TensorFlow.GenOps.Core
                                batchMatrixInverseTensorFlow.GenOps.Core
                                batchMatrixInverse'TensorFlow.GenOps.Core
                                batchMatrixSetDiagTensorFlow.GenOps.Core
                                batchMatrixSetDiag'TensorFlow.GenOps.Core
                                batchMatrixSolveTensorFlow.GenOps.Core
                                batchMatrixSolve'TensorFlow.GenOps.Core
                                batchMatrixSolveLsTensorFlow.GenOps.Core
                                batchMatrixSolveLs'TensorFlow.GenOps.Core
                                batchMatrixTriangularSolveTensorFlow.GenOps.Core
                                batchMatrixTriangularSolve'TensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
                                batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
                                batchSelfAdjointEigTensorFlow.GenOps.Core
                                batchSelfAdjointEig'TensorFlow.GenOps.Core
                                batchSelfAdjointEigV2TensorFlow.GenOps.Core
                                batchSelfAdjointEigV2'TensorFlow.GenOps.Core
                                batchSvdTensorFlow.GenOps.Core
                                batchSvd'TensorFlow.GenOps.Core
                                batchToSpaceTensorFlow.GenOps.Core
                                batchToSpace'TensorFlow.GenOps.Core
                                batchToSpaceNDTensorFlow.GenOps.Core
                                batchToSpaceND'TensorFlow.GenOps.Core
                                betaincTensorFlow.GenOps.Core
                                betainc'TensorFlow.GenOps.Core
                                biasAddTensorFlow.GenOps.Core
                                biasAdd'TensorFlow.GenOps.Core
                                biasAddGradTensorFlow.GenOps.Core
                                biasAddGrad'TensorFlow.GenOps.Core
                                biasAddV1TensorFlow.GenOps.Core
                                biasAddV1'TensorFlow.GenOps.Core
                                bitcastTensorFlow.GenOps.Core
                                bitcast'TensorFlow.GenOps.Core
                                broadcastArgsTensorFlow.GenOps.Core
                                broadcastArgs'TensorFlow.GenOps.Core
                                broadcastGradientArgsTensorFlow.GenOps.Core
                                broadcastGradientArgs'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - B

                                barrierTensorFlow.GenOps.Core
                                barrier'TensorFlow.GenOps.Core
                                barrierCloseTensorFlow.GenOps.Core
                                barrierClose'TensorFlow.GenOps.Core
                                barrierIncompleteSizeTensorFlow.GenOps.Core
                                barrierIncompleteSize'TensorFlow.GenOps.Core
                                barrierInsertManyTensorFlow.GenOps.Core
                                barrierInsertMany'TensorFlow.GenOps.Core
                                barrierReadySizeTensorFlow.GenOps.Core
                                barrierReadySize'TensorFlow.GenOps.Core
                                barrierTakeManyTensorFlow.GenOps.Core
                                barrierTakeMany'TensorFlow.GenOps.Core
                                batchCholeskyTensorFlow.GenOps.Core
                                batchCholesky'TensorFlow.GenOps.Core
                                batchCholeskyGradTensorFlow.GenOps.Core
                                batchCholeskyGrad'TensorFlow.GenOps.Core
                                batchDatasetTensorFlow.GenOps.Core
                                batchDataset'TensorFlow.GenOps.Core
                                batchFFTTensorFlow.GenOps.Core
                                batchFFT'TensorFlow.GenOps.Core
                                batchFFT2DTensorFlow.GenOps.Core
                                batchFFT2D'TensorFlow.GenOps.Core
                                batchFFT3DTensorFlow.GenOps.Core
                                batchFFT3D'TensorFlow.GenOps.Core
                                batchIFFTTensorFlow.GenOps.Core
                                batchIFFT'TensorFlow.GenOps.Core
                                batchIFFT2DTensorFlow.GenOps.Core
                                batchIFFT2D'TensorFlow.GenOps.Core
                                batchIFFT3DTensorFlow.GenOps.Core
                                batchIFFT3D'TensorFlow.GenOps.Core
                                batchMatMulTensorFlow.GenOps.Core
                                batchMatMul'TensorFlow.GenOps.Core
                                batchMatrixBandPartTensorFlow.GenOps.Core
                                batchMatrixBandPart'TensorFlow.GenOps.Core
                                batchMatrixDeterminantTensorFlow.GenOps.Core
                                batchMatrixDeterminant'TensorFlow.GenOps.Core
                                batchMatrixDiagTensorFlow.GenOps.Core
                                batchMatrixDiag'TensorFlow.GenOps.Core
                                batchMatrixDiagPartTensorFlow.GenOps.Core
                                batchMatrixDiagPart'TensorFlow.GenOps.Core
                                batchMatrixInverseTensorFlow.GenOps.Core
                                batchMatrixInverse'TensorFlow.GenOps.Core
                                batchMatrixSetDiagTensorFlow.GenOps.Core
                                batchMatrixSetDiag'TensorFlow.GenOps.Core
                                batchMatrixSolveTensorFlow.GenOps.Core
                                batchMatrixSolve'TensorFlow.GenOps.Core
                                batchMatrixSolveLsTensorFlow.GenOps.Core
                                batchMatrixSolveLs'TensorFlow.GenOps.Core
                                batchMatrixTriangularSolveTensorFlow.GenOps.Core
                                batchMatrixTriangularSolve'TensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
                                batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
                                batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
                                batchSelfAdjointEigTensorFlow.GenOps.Core
                                batchSelfAdjointEig'TensorFlow.GenOps.Core
                                batchSelfAdjointEigV2TensorFlow.GenOps.Core
                                batchSelfAdjointEigV2'TensorFlow.GenOps.Core
                                batchSvdTensorFlow.GenOps.Core
                                batchSvd'TensorFlow.GenOps.Core
                                batchToSpaceTensorFlow.GenOps.Core
                                batchToSpace'TensorFlow.GenOps.Core
                                batchToSpaceNDTensorFlow.GenOps.Core
                                batchToSpaceND'TensorFlow.GenOps.Core
                                betaincTensorFlow.GenOps.Core
                                betainc'TensorFlow.GenOps.Core
                                biasAddTensorFlow.GenOps.Core
                                biasAdd'TensorFlow.GenOps.Core
                                biasAddGradTensorFlow.GenOps.Core
                                biasAddGrad'TensorFlow.GenOps.Core
                                biasAddV1TensorFlow.GenOps.Core
                                biasAddV1'TensorFlow.GenOps.Core
                                bincountTensorFlow.GenOps.Core
                                bincount'TensorFlow.GenOps.Core
                                bitcastTensorFlow.GenOps.Core
                                bitcast'TensorFlow.GenOps.Core
                                bitwiseAndTensorFlow.GenOps.Core
                                bitwiseAnd'TensorFlow.GenOps.Core
                                bitwiseOrTensorFlow.GenOps.Core
                                bitwiseOr'TensorFlow.GenOps.Core
                                bitwiseXorTensorFlow.GenOps.Core
                                bitwiseXor'TensorFlow.GenOps.Core
                                broadcastArgsTensorFlow.GenOps.Core
                                broadcastArgs'TensorFlow.GenOps.Core
                                broadcastGradientArgsTensorFlow.GenOps.Core
                                broadcastGradientArgs'TensorFlow.GenOps.Core
                                bucketizeTensorFlow.GenOps.Core
                                bucketize'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html index 9b087ae..18843d9 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - C)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - C

                                castTensorFlow.GenOps.Core
                                cast'TensorFlow.GenOps.Core
                                ceilTensorFlow.GenOps.Core
                                ceil'TensorFlow.GenOps.Core
                                checkNumericsTensorFlow.GenOps.Core
                                checkNumerics'TensorFlow.GenOps.Core
                                choleskyTensorFlow.GenOps.Core
                                cholesky'TensorFlow.GenOps.Core
                                choleskyGradTensorFlow.GenOps.Core
                                choleskyGrad'TensorFlow.GenOps.Core
                                complexTensorFlow.GenOps.Core
                                complex'TensorFlow.GenOps.Core
                                complexAbsTensorFlow.GenOps.Core
                                complexAbs'TensorFlow.GenOps.Core
                                computeAccidentalHitsTensorFlow.GenOps.Core
                                computeAccidentalHits'TensorFlow.GenOps.Core
                                concatTensorFlow.GenOps.Core
                                concat'TensorFlow.GenOps.Core
                                concatOffsetTensorFlow.GenOps.Core
                                concatOffset'TensorFlow.GenOps.Core
                                concatV2TensorFlow.GenOps.Core
                                concatV2'TensorFlow.GenOps.Core
                                conditionalAccumulatorTensorFlow.GenOps.Core
                                conditionalAccumulator'TensorFlow.GenOps.Core
                                conjTensorFlow.GenOps.Core
                                conj'TensorFlow.GenOps.Core
                                constTensorFlow.GenOps.Core
                                const'TensorFlow.GenOps.Core
                                controlTriggerTensorFlow.GenOps.Core
                                controlTrigger'TensorFlow.GenOps.Core
                                conv2DTensorFlow.GenOps.Core
                                conv2D'TensorFlow.GenOps.Core
                                conv2DBackpropFilterTensorFlow.GenOps.Core
                                conv2DBackpropFilter'TensorFlow.GenOps.Core
                                conv2DBackpropInputTensorFlow.GenOps.Core
                                conv2DBackpropInput'TensorFlow.GenOps.Core
                                conv3DTensorFlow.GenOps.Core
                                conv3D'TensorFlow.GenOps.Core
                                conv3DBackpropFilterTensorFlow.GenOps.Core
                                conv3DBackpropFilter'TensorFlow.GenOps.Core
                                conv3DBackpropFilterV2TensorFlow.GenOps.Core
                                conv3DBackpropFilterV2'TensorFlow.GenOps.Core
                                conv3DBackpropInputTensorFlow.GenOps.Core
                                conv3DBackpropInput'TensorFlow.GenOps.Core
                                conv3DBackpropInputV2TensorFlow.GenOps.Core
                                conv3DBackpropInputV2'TensorFlow.GenOps.Core
                                copyTensorFlow.GenOps.Core
                                copy'TensorFlow.GenOps.Core
                                copyHostTensorFlow.GenOps.Core
                                copyHost'TensorFlow.GenOps.Core
                                cosTensorFlow.GenOps.Core
                                cos'TensorFlow.GenOps.Core
                                countUpToTensorFlow.GenOps.Core
                                countUpTo'TensorFlow.GenOps.Core
                                cropAndResizeTensorFlow.GenOps.Core
                                cropAndResize'TensorFlow.GenOps.Core
                                cropAndResizeGradBoxesTensorFlow.GenOps.Core
                                cropAndResizeGradBoxes'TensorFlow.GenOps.Core
                                cropAndResizeGradImageTensorFlow.GenOps.Core
                                cropAndResizeGradImage'TensorFlow.GenOps.Core
                                crossTensorFlow.GenOps.Core
                                cross'TensorFlow.GenOps.Core
                                cTCBeamSearchDecoderTensorFlow.GenOps.Core
                                cTCBeamSearchDecoder'TensorFlow.GenOps.Core
                                cTCGreedyDecoderTensorFlow.GenOps.Core
                                cTCGreedyDecoder'TensorFlow.GenOps.Core
                                cTCLossTensorFlow.GenOps.Core
                                cTCLoss'TensorFlow.GenOps.Core
                                cumprodTensorFlow.GenOps.Core
                                cumprod'TensorFlow.GenOps.Core
                                cumsumTensorFlow.GenOps.Core
                                cumsum'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - C

                                cacheDatasetTensorFlow.GenOps.Core
                                cacheDataset'TensorFlow.GenOps.Core
                                castTensorFlow.GenOps.Core
                                cast'TensorFlow.GenOps.Core
                                ceilTensorFlow.GenOps.Core
                                ceil'TensorFlow.GenOps.Core
                                checkNumericsTensorFlow.GenOps.Core
                                checkNumerics'TensorFlow.GenOps.Core
                                choleskyTensorFlow.GenOps.Core
                                cholesky'TensorFlow.GenOps.Core
                                choleskyGradTensorFlow.GenOps.Core
                                choleskyGrad'TensorFlow.GenOps.Core
                                complexTensorFlow.GenOps.Core
                                complex'TensorFlow.GenOps.Core
                                complexAbsTensorFlow.GenOps.Core
                                complexAbs'TensorFlow.GenOps.Core
                                computeAccidentalHitsTensorFlow.GenOps.Core
                                computeAccidentalHits'TensorFlow.GenOps.Core
                                concatTensorFlow.GenOps.Core
                                concat'TensorFlow.GenOps.Core
                                concatenateDatasetTensorFlow.GenOps.Core
                                concatenateDataset'TensorFlow.GenOps.Core
                                concatOffsetTensorFlow.GenOps.Core
                                concatOffset'TensorFlow.GenOps.Core
                                concatV2TensorFlow.GenOps.Core
                                concatV2'TensorFlow.GenOps.Core
                                conditionalAccumulatorTensorFlow.GenOps.Core
                                conditionalAccumulator'TensorFlow.GenOps.Core
                                conjTensorFlow.GenOps.Core
                                conj'TensorFlow.GenOps.Core
                                constTensorFlow.GenOps.Core
                                const'TensorFlow.GenOps.Core
                                controlTriggerTensorFlow.GenOps.Core
                                controlTrigger'TensorFlow.GenOps.Core
                                conv2DTensorFlow.GenOps.Core
                                conv2D'TensorFlow.GenOps.Core
                                conv2DBackpropFilterTensorFlow.GenOps.Core
                                conv2DBackpropFilter'TensorFlow.GenOps.Core
                                conv2DBackpropInputTensorFlow.GenOps.Core
                                conv2DBackpropInput'TensorFlow.GenOps.Core
                                conv3DTensorFlow.GenOps.Core
                                conv3D'TensorFlow.GenOps.Core
                                conv3DBackpropFilterTensorFlow.GenOps.Core
                                conv3DBackpropFilter'TensorFlow.GenOps.Core
                                conv3DBackpropFilterV2TensorFlow.GenOps.Core
                                conv3DBackpropFilterV2'TensorFlow.GenOps.Core
                                conv3DBackpropInputTensorFlow.GenOps.Core
                                conv3DBackpropInput'TensorFlow.GenOps.Core
                                conv3DBackpropInputV2TensorFlow.GenOps.Core
                                conv3DBackpropInputV2'TensorFlow.GenOps.Core
                                cosTensorFlow.GenOps.Core
                                cos'TensorFlow.GenOps.Core
                                coshTensorFlow.GenOps.Core
                                cosh'TensorFlow.GenOps.Core
                                countUpToTensorFlow.GenOps.Core
                                countUpTo'TensorFlow.GenOps.Core
                                cropAndResizeTensorFlow.GenOps.Core
                                cropAndResize'TensorFlow.GenOps.Core
                                cropAndResizeGradBoxesTensorFlow.GenOps.Core
                                cropAndResizeGradBoxes'TensorFlow.GenOps.Core
                                cropAndResizeGradImageTensorFlow.GenOps.Core
                                cropAndResizeGradImage'TensorFlow.GenOps.Core
                                crossTensorFlow.GenOps.Core
                                cross'TensorFlow.GenOps.Core
                                cTCBeamSearchDecoderTensorFlow.GenOps.Core
                                cTCBeamSearchDecoder'TensorFlow.GenOps.Core
                                cTCGreedyDecoderTensorFlow.GenOps.Core
                                cTCGreedyDecoder'TensorFlow.GenOps.Core
                                cTCLossTensorFlow.GenOps.Core
                                cTCLoss'TensorFlow.GenOps.Core
                                cumprodTensorFlow.GenOps.Core
                                cumprod'TensorFlow.GenOps.Core
                                cumsumTensorFlow.GenOps.Core
                                cumsum'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html index 73a756e..a7d27fb 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - D)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - D

                                debugIdentityTensorFlow.GenOps.Core
                                debugIdentity'TensorFlow.GenOps.Core
                                debugNanCountTensorFlow.GenOps.Core
                                debugNanCount'TensorFlow.GenOps.Core
                                debugNumericSummaryTensorFlow.GenOps.Core
                                debugNumericSummary'TensorFlow.GenOps.Core
                                decodeBase64TensorFlow.GenOps.Core
                                decodeBase64'TensorFlow.GenOps.Core
                                decodeCSVTensorFlow.GenOps.Core
                                decodeCSV'TensorFlow.GenOps.Core
                                decodeGifTensorFlow.GenOps.Core
                                decodeGif'TensorFlow.GenOps.Core
                                decodeJpegTensorFlow.GenOps.Core
                                decodeJpeg'TensorFlow.GenOps.Core
                                decodeJSONExampleTensorFlow.GenOps.Core
                                decodeJSONExample'TensorFlow.GenOps.Core
                                decodePngTensorFlow.GenOps.Core
                                decodePng'TensorFlow.GenOps.Core
                                decodeRawTensorFlow.GenOps.Core
                                decodeRaw'TensorFlow.GenOps.Core
                                deleteSessionTensorTensorFlow.GenOps.Core
                                deleteSessionTensor'TensorFlow.GenOps.Core
                                denseToDenseSetOperationTensorFlow.GenOps.Core
                                denseToDenseSetOperation'TensorFlow.GenOps.Core
                                denseToSparseSetOperationTensorFlow.GenOps.Core
                                denseToSparseSetOperation'TensorFlow.GenOps.Core
                                depthToSpaceTensorFlow.GenOps.Core
                                depthToSpace'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeTensorFlow.GenOps.Core
                                depthwiseConv2dNative'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
                                dequantizeTensorFlow.GenOps.Core
                                dequantize'TensorFlow.GenOps.Core
                                deserializeManySparseTensorFlow.GenOps.Core
                                deserializeManySparse'TensorFlow.GenOps.Core
                                destroyTemporaryVariableTensorFlow.GenOps.Core
                                destroyTemporaryVariable'TensorFlow.GenOps.Core
                                diagTensorFlow.GenOps.Core
                                diag'TensorFlow.GenOps.Core
                                diagPartTensorFlow.GenOps.Core
                                diagPart'TensorFlow.GenOps.Core
                                digammaTensorFlow.GenOps.Core
                                digamma'TensorFlow.GenOps.Core
                                dilation2DTensorFlow.GenOps.Core
                                dilation2D'TensorFlow.GenOps.Core
                                dilation2DBackpropFilterTensorFlow.GenOps.Core
                                dilation2DBackpropFilter'TensorFlow.GenOps.Core
                                dilation2DBackpropInputTensorFlow.GenOps.Core
                                dilation2DBackpropInput'TensorFlow.GenOps.Core
                                divTensorFlow.GenOps.Core
                                div'TensorFlow.GenOps.Core
                                drawBoundingBoxesTensorFlow.GenOps.Core
                                drawBoundingBoxes'TensorFlow.GenOps.Core
                                dynamicPartitionTensorFlow.GenOps.Core
                                dynamicPartition'TensorFlow.GenOps.Core
                                dynamicStitchTensorFlow.GenOps.Core
                                dynamicStitch'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - D

                                debugGradientIdentityTensorFlow.GenOps.Core
                                debugGradientIdentity'TensorFlow.GenOps.Core
                                decodeBase64TensorFlow.GenOps.Core
                                decodeBase64'TensorFlow.GenOps.Core
                                decodeBmpTensorFlow.GenOps.Core
                                decodeBmp'TensorFlow.GenOps.Core
                                decodeCSVTensorFlow.GenOps.Core
                                decodeCSV'TensorFlow.GenOps.Core
                                decodeGifTensorFlow.GenOps.Core
                                decodeGif'TensorFlow.GenOps.Core
                                decodeJpegTensorFlow.GenOps.Core
                                decodeJpeg'TensorFlow.GenOps.Core
                                decodeJSONExampleTensorFlow.GenOps.Core
                                decodeJSONExample'TensorFlow.GenOps.Core
                                decodePngTensorFlow.GenOps.Core
                                decodePng'TensorFlow.GenOps.Core
                                decodeRawTensorFlow.GenOps.Core
                                decodeRaw'TensorFlow.GenOps.Core
                                decodeWavTensorFlow.GenOps.Core
                                decodeWav'TensorFlow.GenOps.Core
                                deleteSessionTensorTensorFlow.GenOps.Core
                                deleteSessionTensor'TensorFlow.GenOps.Core
                                denseToDenseSetOperationTensorFlow.GenOps.Core
                                denseToDenseSetOperation'TensorFlow.GenOps.Core
                                denseToSparseBatchDatasetTensorFlow.GenOps.Core
                                denseToSparseBatchDataset'TensorFlow.GenOps.Core
                                denseToSparseSetOperationTensorFlow.GenOps.Core
                                denseToSparseSetOperation'TensorFlow.GenOps.Core
                                depthToSpaceTensorFlow.GenOps.Core
                                depthToSpace'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeTensorFlow.GenOps.Core
                                depthwiseConv2dNative'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
                                depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
                                dequantizeTensorFlow.GenOps.Core
                                dequantize'TensorFlow.GenOps.Core
                                deserializeManySparseTensorFlow.GenOps.Core
                                deserializeManySparse'TensorFlow.GenOps.Core
                                destroyResourceOpTensorFlow.GenOps.Core
                                destroyResourceOp'TensorFlow.GenOps.Core
                                destroyTemporaryVariableTensorFlow.GenOps.Core
                                destroyTemporaryVariable'TensorFlow.GenOps.Core
                                diagTensorFlow.GenOps.Core
                                diag'TensorFlow.GenOps.Core
                                diagPartTensorFlow.GenOps.Core
                                diagPart'TensorFlow.GenOps.Core
                                digammaTensorFlow.GenOps.Core
                                digamma'TensorFlow.GenOps.Core
                                dilation2DTensorFlow.GenOps.Core
                                dilation2D'TensorFlow.GenOps.Core
                                dilation2DBackpropFilterTensorFlow.GenOps.Core
                                dilation2DBackpropFilter'TensorFlow.GenOps.Core
                                dilation2DBackpropInputTensorFlow.GenOps.Core
                                dilation2DBackpropInput'TensorFlow.GenOps.Core
                                divTensorFlow.GenOps.Core
                                div'TensorFlow.GenOps.Core
                                drawBoundingBoxesTensorFlow.GenOps.Core
                                drawBoundingBoxes'TensorFlow.GenOps.Core
                                dynamicPartitionTensorFlow.GenOps.Core
                                dynamicPartition'TensorFlow.GenOps.Core
                                dynamicStitchTensorFlow.GenOps.Core
                                dynamicStitch'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html index a3f764f..d1181c4 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - E)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html index e3c8490..168149f 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - F)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - F

                                factTensorFlow.GenOps.Core
                                fact'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
                                fakeQueueTensorFlow.GenOps.Core
                                fakeQueue'TensorFlow.GenOps.Core
                                fFTTensorFlow.GenOps.Core
                                fFT'TensorFlow.GenOps.Core
                                fFT2DTensorFlow.GenOps.Core
                                fFT2D'TensorFlow.GenOps.Core
                                fFT3DTensorFlow.GenOps.Core
                                fFT3D'TensorFlow.GenOps.Core
                                fIFOQueueTensorFlow.GenOps.Core
                                fIFOQueue'TensorFlow.GenOps.Core
                                fIFOQueueV2TensorFlow.GenOps.Core
                                fIFOQueueV2'TensorFlow.GenOps.Core
                                fillTensorFlow.GenOps.Core
                                fill'TensorFlow.GenOps.Core
                                fixedLengthRecordReaderTensorFlow.GenOps.Core
                                fixedLengthRecordReader'TensorFlow.GenOps.Core
                                fixedLengthRecordReaderV2TensorFlow.GenOps.Core
                                fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
                                fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
                                fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
                                floorTensorFlow.GenOps.Core
                                floor'TensorFlow.GenOps.Core
                                floorDivTensorFlow.GenOps.Core
                                floorDiv'TensorFlow.GenOps.Core
                                floorModTensorFlow.GenOps.Core
                                floorMod'TensorFlow.GenOps.Core
                                fractionalAvgPoolTensorFlow.GenOps.Core
                                fractionalAvgPool'TensorFlow.GenOps.Core
                                fractionalAvgPoolGradTensorFlow.GenOps.Core
                                fractionalAvgPoolGrad'TensorFlow.GenOps.Core
                                fractionalMaxPoolTensorFlow.GenOps.Core
                                fractionalMaxPool'TensorFlow.GenOps.Core
                                fractionalMaxPoolGradTensorFlow.GenOps.Core
                                fractionalMaxPoolGrad'TensorFlow.GenOps.Core
                                fusedBatchNormTensorFlow.GenOps.Core
                                fusedBatchNorm'TensorFlow.GenOps.Core
                                fusedBatchNormGradTensorFlow.GenOps.Core
                                fusedBatchNormGrad'TensorFlow.GenOps.Core
                                fusedPadConv2DTensorFlow.GenOps.Core
                                fusedPadConv2D'TensorFlow.GenOps.Core
                                fusedResizeAndPadConv2DTensorFlow.GenOps.Core
                                fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - F

                                factTensorFlow.GenOps.Core
                                fact'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
                                fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
                                fakeQueueTensorFlow.GenOps.Core
                                fakeQueue'TensorFlow.GenOps.Core
                                fFTTensorFlow.GenOps.Core
                                fFT'TensorFlow.GenOps.Core
                                fFT2DTensorFlow.GenOps.Core
                                fFT2D'TensorFlow.GenOps.Core
                                fFT3DTensorFlow.GenOps.Core
                                fFT3D'TensorFlow.GenOps.Core
                                fIFOQueueTensorFlow.GenOps.Core
                                fIFOQueue'TensorFlow.GenOps.Core
                                fIFOQueueV2TensorFlow.GenOps.Core
                                fIFOQueueV2'TensorFlow.GenOps.Core
                                fillTensorFlow.GenOps.Core
                                fill'TensorFlow.GenOps.Core
                                fixedLengthRecordDatasetTensorFlow.GenOps.Core
                                fixedLengthRecordDataset'TensorFlow.GenOps.Core
                                fixedLengthRecordReaderTensorFlow.GenOps.Core
                                fixedLengthRecordReader'TensorFlow.GenOps.Core
                                fixedLengthRecordReaderV2TensorFlow.GenOps.Core
                                fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
                                fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
                                fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
                                floorTensorFlow.GenOps.Core
                                floor'TensorFlow.GenOps.Core
                                floorDivTensorFlow.GenOps.Core
                                floorDiv'TensorFlow.GenOps.Core
                                floorModTensorFlow.GenOps.Core
                                floorMod'TensorFlow.GenOps.Core
                                fractionalAvgPoolTensorFlow.GenOps.Core
                                fractionalAvgPool'TensorFlow.GenOps.Core
                                fractionalAvgPoolGradTensorFlow.GenOps.Core
                                fractionalAvgPoolGrad'TensorFlow.GenOps.Core
                                fractionalMaxPoolTensorFlow.GenOps.Core
                                fractionalMaxPool'TensorFlow.GenOps.Core
                                fractionalMaxPoolGradTensorFlow.GenOps.Core
                                fractionalMaxPoolGrad'TensorFlow.GenOps.Core
                                fusedBatchNormTensorFlow.GenOps.Core
                                fusedBatchNorm'TensorFlow.GenOps.Core
                                fusedBatchNormGradTensorFlow.GenOps.Core
                                fusedBatchNormGrad'TensorFlow.GenOps.Core
                                fusedPadConv2DTensorFlow.GenOps.Core
                                fusedPadConv2D'TensorFlow.GenOps.Core
                                fusedResizeAndPadConv2DTensorFlow.GenOps.Core
                                fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html index 5620d20..2192c1c 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - G)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html index f331a82..16b3b87 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - H)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html index 789d9a2..91ea40b 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - I)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - I

                                identityTensorFlow.GenOps.Core
                                identity'TensorFlow.GenOps.Core
                                identityReaderTensorFlow.GenOps.Core
                                identityReader'TensorFlow.GenOps.Core
                                identityReaderV2TensorFlow.GenOps.Core
                                identityReaderV2'TensorFlow.GenOps.Core
                                iFFTTensorFlow.GenOps.Core
                                iFFT'TensorFlow.GenOps.Core
                                iFFT2DTensorFlow.GenOps.Core
                                iFFT2D'TensorFlow.GenOps.Core
                                iFFT3DTensorFlow.GenOps.Core
                                iFFT3D'TensorFlow.GenOps.Core
                                igammaTensorFlow.GenOps.Core
                                igamma'TensorFlow.GenOps.Core
                                igammacTensorFlow.GenOps.Core
                                igammac'TensorFlow.GenOps.Core
                                ignoreErrorsDatasetTensorFlow.GenOps.Core
                                ignoreErrorsDataset'TensorFlow.GenOps.Core
                                imagTensorFlow.GenOps.Core
                                imag'TensorFlow.GenOps.Core
                                imageSummaryTensorFlow.GenOps.Core
                                imageSummary'TensorFlow.GenOps.Core
                                immutableConstTensorFlow.GenOps.Core
                                immutableConst'TensorFlow.GenOps.Core
                                initializeTableTensorFlow.GenOps.Core
                                initializeTable'TensorFlow.GenOps.Core
                                initializeTableFromTextFileTensorFlow.GenOps.Core
                                initializeTableFromTextFile'TensorFlow.GenOps.Core
                                initializeTableFromTextFileV2TensorFlow.GenOps.Core
                                initializeTableFromTextFileV2'TensorFlow.GenOps.Core
                                initializeTableV2TensorFlow.GenOps.Core
                                initializeTableV2'TensorFlow.GenOps.Core
                                inTopKTensorFlow.GenOps.Core
                                inTopK'TensorFlow.GenOps.Core
                                invTensorFlow.GenOps.Core
                                inv'TensorFlow.GenOps.Core
                                invertTensorFlow.GenOps.Core
                                invert'TensorFlow.GenOps.Core
                                invertPermutationTensorFlow.GenOps.Core
                                invertPermutation'TensorFlow.GenOps.Core
                                invGradTensorFlow.GenOps.Core
                                invGrad'TensorFlow.GenOps.Core
                                iRFFTTensorFlow.GenOps.Core
                                iRFFT'TensorFlow.GenOps.Core
                                iRFFT2DTensorFlow.GenOps.Core
                                iRFFT2D'TensorFlow.GenOps.Core
                                iRFFT3DTensorFlow.GenOps.Core
                                iRFFT3D'TensorFlow.GenOps.Core
                                isFiniteTensorFlow.GenOps.Core
                                isFinite'TensorFlow.GenOps.Core
                                isInfTensorFlow.GenOps.Core
                                isInf'TensorFlow.GenOps.Core
                                isNanTensorFlow.GenOps.Core
                                isNan'TensorFlow.GenOps.Core
                                isVariableInitializedTensorFlow.GenOps.Core
                                isVariableInitialized'TensorFlow.GenOps.Core
                                iteratorTensorFlow.GenOps.Core
                                iterator'TensorFlow.GenOps.Core
                                iteratorDisposeTensorFlow.GenOps.Core
                                iteratorDispose'TensorFlow.GenOps.Core
                                iteratorFromStringHandleTensorFlow.GenOps.Core
                                iteratorFromStringHandle'TensorFlow.GenOps.Core
                                iteratorGetNextTensorFlow.GenOps.Core
                                iteratorGetNext'TensorFlow.GenOps.Core
                                iteratorToStringHandleTensorFlow.GenOps.Core
                                iteratorToStringHandle'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html index 5497639..acf65d7 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - L)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - L

                                l2LossTensorFlow.GenOps.Core
                                l2Loss'TensorFlow.GenOps.Core
                                learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
                                learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
                                lessTensorFlow.GenOps.Core
                                less'TensorFlow.GenOps.Core
                                lessEqualTensorFlow.GenOps.Core
                                lessEqual'TensorFlow.GenOps.Core
                                lgammaTensorFlow.GenOps.Core
                                lgamma'TensorFlow.GenOps.Core
                                linSpaceTensorFlow.GenOps.Core
                                linSpace'TensorFlow.GenOps.Core
                                listDiffTensorFlow.GenOps.Core
                                listDiff'TensorFlow.GenOps.Core
                                lMDBReaderTensorFlow.GenOps.Core
                                lMDBReader'TensorFlow.GenOps.Core
                                logTensorFlow.GenOps.Core
                                log'TensorFlow.GenOps.Core
                                log1pTensorFlow.GenOps.Core
                                log1p'TensorFlow.GenOps.Core
                                logicalAndTensorFlow.GenOps.Core
                                logicalAnd'TensorFlow.GenOps.Core
                                logicalNotTensorFlow.GenOps.Core
                                logicalNot'TensorFlow.GenOps.Core
                                logicalOrTensorFlow.GenOps.Core
                                logicalOr'TensorFlow.GenOps.Core
                                logSoftmaxTensorFlow.GenOps.Core
                                logSoftmax'TensorFlow.GenOps.Core
                                logUniformCandidateSamplerTensorFlow.GenOps.Core
                                logUniformCandidateSampler'TensorFlow.GenOps.Core
                                lookupTableExportTensorFlow.GenOps.Core
                                lookupTableExport'TensorFlow.GenOps.Core
                                lookupTableExportV2TensorFlow.GenOps.Core
                                lookupTableExportV2'TensorFlow.GenOps.Core
                                lookupTableFindTensorFlow.GenOps.Core
                                lookupTableFind'TensorFlow.GenOps.Core
                                lookupTableFindV2TensorFlow.GenOps.Core
                                lookupTableFindV2'TensorFlow.GenOps.Core
                                lookupTableImportTensorFlow.GenOps.Core
                                lookupTableImport'TensorFlow.GenOps.Core
                                lookupTableImportV2TensorFlow.GenOps.Core
                                lookupTableImportV2'TensorFlow.GenOps.Core
                                lookupTableInsertTensorFlow.GenOps.Core
                                lookupTableInsert'TensorFlow.GenOps.Core
                                lookupTableInsertV2TensorFlow.GenOps.Core
                                lookupTableInsertV2'TensorFlow.GenOps.Core
                                lookupTableSizeTensorFlow.GenOps.Core
                                lookupTableSize'TensorFlow.GenOps.Core
                                lookupTableSizeV2TensorFlow.GenOps.Core
                                lookupTableSizeV2'TensorFlow.GenOps.Core
                                loopCondTensorFlow.GenOps.Core
                                loopCond'TensorFlow.GenOps.Core
                                lRNTensorFlow.GenOps.Core
                                lRN'TensorFlow.GenOps.Core
                                lRNGradTensorFlow.GenOps.Core
                                lRNGrad'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html index b97b8c6..6f7010f 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - M)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - M

                                matchingFilesTensorFlow.GenOps.Core
                                matchingFiles'TensorFlow.GenOps.Core
                                matMulTensorFlow.GenOps.Core
                                matMul'TensorFlow.GenOps.Core
                                matrixBandPartTensorFlow.GenOps.Core
                                matrixBandPart'TensorFlow.GenOps.Core
                                matrixDeterminantTensorFlow.GenOps.Core
                                matrixDeterminant'TensorFlow.GenOps.Core
                                matrixDiagTensorFlow.GenOps.Core
                                matrixDiag'TensorFlow.GenOps.Core
                                matrixDiagPartTensorFlow.GenOps.Core
                                matrixDiagPart'TensorFlow.GenOps.Core
                                matrixInverseTensorFlow.GenOps.Core
                                matrixInverse'TensorFlow.GenOps.Core
                                matrixSetDiagTensorFlow.GenOps.Core
                                matrixSetDiag'TensorFlow.GenOps.Core
                                matrixSolveTensorFlow.GenOps.Core
                                matrixSolve'TensorFlow.GenOps.Core
                                matrixSolveLsTensorFlow.GenOps.Core
                                matrixSolveLs'TensorFlow.GenOps.Core
                                matrixTriangularSolveTensorFlow.GenOps.Core
                                matrixTriangularSolve'TensorFlow.GenOps.Core
                                maxTensorFlow.GenOps.Core
                                max'TensorFlow.GenOps.Core
                                maximumTensorFlow.GenOps.Core
                                maximum'TensorFlow.GenOps.Core
                                maxPoolTensorFlow.GenOps.Core
                                maxPool'TensorFlow.GenOps.Core
                                maxPool3DTensorFlow.GenOps.Core
                                maxPool3D'TensorFlow.GenOps.Core
                                maxPool3DGradTensorFlow.GenOps.Core
                                maxPool3DGrad'TensorFlow.GenOps.Core
                                maxPoolGradTensorFlow.GenOps.Core
                                maxPoolGrad'TensorFlow.GenOps.Core
                                maxPoolGradWithArgmaxTensorFlow.GenOps.Core
                                maxPoolGradWithArgmax'TensorFlow.GenOps.Core
                                maxPoolWithArgmaxTensorFlow.GenOps.Core
                                maxPoolWithArgmax'TensorFlow.GenOps.Core
                                meanTensorFlow.GenOps.Core
                                mean'TensorFlow.GenOps.Core
                                mergeTensorFlow.GenOps.Core
                                merge'TensorFlow.GenOps.Core
                                mergeSummaryTensorFlow.GenOps.Core
                                mergeSummary'TensorFlow.GenOps.Core
                                mergeV2CheckpointsTensorFlow.GenOps.Core
                                mergeV2Checkpoints'TensorFlow.GenOps.Core
                                minTensorFlow.GenOps.Core
                                min'TensorFlow.GenOps.Core
                                minimumTensorFlow.GenOps.Core
                                minimum'TensorFlow.GenOps.Core
                                mirrorPadTensorFlow.GenOps.Core
                                mirrorPad'TensorFlow.GenOps.Core
                                mirrorPadGradTensorFlow.GenOps.Core
                                mirrorPadGrad'TensorFlow.GenOps.Core
                                modTensorFlow.GenOps.Core
                                mod'TensorFlow.GenOps.Core
                                mulTensorFlow.GenOps.Core
                                mul'TensorFlow.GenOps.Core
                                multinomialTensorFlow.GenOps.Core
                                multinomial'TensorFlow.GenOps.Core
                                mutableDenseHashTableTensorFlow.GenOps.Core
                                mutableDenseHashTable'TensorFlow.GenOps.Core
                                mutableHashTableTensorFlow.GenOps.Core
                                mutableHashTable'TensorFlow.GenOps.Core
                                mutableHashTableOfTensorsTensorFlow.GenOps.Core
                                mutableHashTableOfTensors'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - M

                                makeIteratorTensorFlow.GenOps.Core
                                makeIterator'TensorFlow.GenOps.Core
                                mapClearTensorFlow.GenOps.Core
                                mapClear'TensorFlow.GenOps.Core
                                mapIncompleteSizeTensorFlow.GenOps.Core
                                mapIncompleteSize'TensorFlow.GenOps.Core
                                mapPeekTensorFlow.GenOps.Core
                                mapPeek'TensorFlow.GenOps.Core
                                mapSizeTensorFlow.GenOps.Core
                                mapSize'TensorFlow.GenOps.Core
                                mapStageTensorFlow.GenOps.Core
                                mapStage'TensorFlow.GenOps.Core
                                mapUnstageTensorFlow.GenOps.Core
                                mapUnstage'TensorFlow.GenOps.Core
                                mapUnstageNoKeyTensorFlow.GenOps.Core
                                mapUnstageNoKey'TensorFlow.GenOps.Core
                                matchingFilesTensorFlow.GenOps.Core
                                matchingFiles'TensorFlow.GenOps.Core
                                matMulTensorFlow.GenOps.Core
                                matMul'TensorFlow.GenOps.Core
                                matrixBandPartTensorFlow.GenOps.Core
                                matrixBandPart'TensorFlow.GenOps.Core
                                matrixDeterminantTensorFlow.GenOps.Core
                                matrixDeterminant'TensorFlow.GenOps.Core
                                matrixDiagTensorFlow.GenOps.Core
                                matrixDiag'TensorFlow.GenOps.Core
                                matrixDiagPartTensorFlow.GenOps.Core
                                matrixDiagPart'TensorFlow.GenOps.Core
                                matrixInverseTensorFlow.GenOps.Core
                                matrixInverse'TensorFlow.GenOps.Core
                                matrixSetDiagTensorFlow.GenOps.Core
                                matrixSetDiag'TensorFlow.GenOps.Core
                                matrixSolveTensorFlow.GenOps.Core
                                matrixSolve'TensorFlow.GenOps.Core
                                matrixSolveLsTensorFlow.GenOps.Core
                                matrixSolveLs'TensorFlow.GenOps.Core
                                matrixTriangularSolveTensorFlow.GenOps.Core
                                matrixTriangularSolve'TensorFlow.GenOps.Core
                                maxTensorFlow.GenOps.Core
                                max'TensorFlow.GenOps.Core
                                maximumTensorFlow.GenOps.Core
                                maximum'TensorFlow.GenOps.Core
                                maxPoolTensorFlow.GenOps.Core
                                maxPool'TensorFlow.GenOps.Core
                                maxPool3DTensorFlow.GenOps.Core
                                maxPool3D'TensorFlow.GenOps.Core
                                maxPool3DGradTensorFlow.GenOps.Core
                                maxPool3DGrad'TensorFlow.GenOps.Core
                                maxPool3DGradGradTensorFlow.GenOps.Core
                                maxPool3DGradGrad'TensorFlow.GenOps.Core
                                maxPoolGradTensorFlow.GenOps.Core
                                maxPoolGrad'TensorFlow.GenOps.Core
                                maxPoolGradGradTensorFlow.GenOps.Core
                                maxPoolGradGrad'TensorFlow.GenOps.Core
                                maxPoolGradGradWithArgmaxTensorFlow.GenOps.Core
                                maxPoolGradGradWithArgmax'TensorFlow.GenOps.Core
                                maxPoolGradWithArgmaxTensorFlow.GenOps.Core
                                maxPoolGradWithArgmax'TensorFlow.GenOps.Core
                                maxPoolWithArgmaxTensorFlow.GenOps.Core
                                maxPoolWithArgmax'TensorFlow.GenOps.Core
                                meanTensorFlow.GenOps.Core
                                mean'TensorFlow.GenOps.Core
                                mergeTensorFlow.GenOps.Core
                                merge'TensorFlow.GenOps.Core
                                mergeSummaryTensorFlow.GenOps.Core
                                mergeSummary'TensorFlow.GenOps.Core
                                mergeV2CheckpointsTensorFlow.GenOps.Core
                                mergeV2Checkpoints'TensorFlow.GenOps.Core
                                mfccTensorFlow.GenOps.Core
                                mfcc'TensorFlow.GenOps.Core
                                minTensorFlow.GenOps.Core
                                min'TensorFlow.GenOps.Core
                                minimumTensorFlow.GenOps.Core
                                minimum'TensorFlow.GenOps.Core
                                mirrorPadTensorFlow.GenOps.Core
                                mirrorPad'TensorFlow.GenOps.Core
                                mirrorPadGradTensorFlow.GenOps.Core
                                mirrorPadGrad'TensorFlow.GenOps.Core
                                modTensorFlow.GenOps.Core
                                mod'TensorFlow.GenOps.Core
                                mulTensorFlow.GenOps.Core
                                mul'TensorFlow.GenOps.Core
                                multinomialTensorFlow.GenOps.Core
                                multinomial'TensorFlow.GenOps.Core
                                mutableDenseHashTableTensorFlow.GenOps.Core
                                mutableDenseHashTable'TensorFlow.GenOps.Core
                                mutableDenseHashTableV2TensorFlow.GenOps.Core
                                mutableDenseHashTableV2'TensorFlow.GenOps.Core
                                mutableHashTableTensorFlow.GenOps.Core
                                mutableHashTable'TensorFlow.GenOps.Core
                                mutableHashTableOfTensorsTensorFlow.GenOps.Core
                                mutableHashTableOfTensors'TensorFlow.GenOps.Core
                                mutableHashTableOfTensorsV2TensorFlow.GenOps.Core
                                mutableHashTableOfTensorsV2'TensorFlow.GenOps.Core
                                mutableHashTableV2TensorFlow.GenOps.Core
                                mutableHashTableV2'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html index c5012ff..d6adeb3 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - N)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html index 0ef5caa..0e914de 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - O)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - O

                                oneHotTensorFlow.GenOps.Core
                                oneHot'TensorFlow.GenOps.Core
                                onesLikeTensorFlow.GenOps.Core
                                onesLike'TensorFlow.GenOps.Core
                                orderedMapClearTensorFlow.GenOps.Core
                                orderedMapClear'TensorFlow.GenOps.Core
                                orderedMapIncompleteSizeTensorFlow.GenOps.Core
                                orderedMapIncompleteSize'TensorFlow.GenOps.Core
                                orderedMapPeekTensorFlow.GenOps.Core
                                orderedMapPeek'TensorFlow.GenOps.Core
                                orderedMapSizeTensorFlow.GenOps.Core
                                orderedMapSize'TensorFlow.GenOps.Core
                                orderedMapStageTensorFlow.GenOps.Core
                                orderedMapStage'TensorFlow.GenOps.Core
                                orderedMapUnstageTensorFlow.GenOps.Core
                                orderedMapUnstage'TensorFlow.GenOps.Core
                                orderedMapUnstageNoKeyTensorFlow.GenOps.Core
                                orderedMapUnstageNoKey'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html index 0acc213..5f37409 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - P)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html index 0fa93cf..09e0108 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - Q)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - Q

                                qrTensorFlow.GenOps.Core
                                qr'TensorFlow.GenOps.Core
                                quantizeAndDequantizeTensorFlow.GenOps.Core
                                quantizeAndDequantize'TensorFlow.GenOps.Core
                                quantizedAvgPoolTensorFlow.GenOps.Core
                                quantizedAvgPool'TensorFlow.GenOps.Core
                                quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
                                quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
                                quantizedBiasAddTensorFlow.GenOps.Core
                                quantizedBiasAdd'TensorFlow.GenOps.Core
                                quantizedConcatTensorFlow.GenOps.Core
                                quantizedConcat'TensorFlow.GenOps.Core
                                quantizedConv2DTensorFlow.GenOps.Core
                                quantizedConv2D'TensorFlow.GenOps.Core
                                quantizedInstanceNormTensorFlow.GenOps.Core
                                quantizedInstanceNorm'TensorFlow.GenOps.Core
                                quantizedMatMulTensorFlow.GenOps.Core
                                quantizedMatMul'TensorFlow.GenOps.Core
                                quantizedMaxPoolTensorFlow.GenOps.Core
                                quantizedMaxPool'TensorFlow.GenOps.Core
                                quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
                                quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
                                quantizedReluTensorFlow.GenOps.Core
                                quantizedRelu'TensorFlow.GenOps.Core
                                quantizedRelu6TensorFlow.GenOps.Core
                                quantizedRelu6'TensorFlow.GenOps.Core
                                quantizedReluXTensorFlow.GenOps.Core
                                quantizedReluX'TensorFlow.GenOps.Core
                                quantizedReshapeTensorFlow.GenOps.Core
                                quantizedReshape'TensorFlow.GenOps.Core
                                quantizeV2TensorFlow.GenOps.Core
                                quantizeV2'TensorFlow.GenOps.Core
                                queueCloseTensorFlow.GenOps.Core
                                queueClose'TensorFlow.GenOps.Core
                                queueCloseV2TensorFlow.GenOps.Core
                                queueCloseV2'TensorFlow.GenOps.Core
                                queueDequeueTensorFlow.GenOps.Core
                                queueDequeue'TensorFlow.GenOps.Core
                                queueDequeueManyTensorFlow.GenOps.Core
                                queueDequeueMany'TensorFlow.GenOps.Core
                                queueDequeueManyV2TensorFlow.GenOps.Core
                                queueDequeueManyV2'TensorFlow.GenOps.Core
                                queueDequeueUpToTensorFlow.GenOps.Core
                                queueDequeueUpTo'TensorFlow.GenOps.Core
                                queueDequeueUpToV2TensorFlow.GenOps.Core
                                queueDequeueUpToV2'TensorFlow.GenOps.Core
                                queueDequeueV2TensorFlow.GenOps.Core
                                queueDequeueV2'TensorFlow.GenOps.Core
                                queueEnqueueTensorFlow.GenOps.Core
                                queueEnqueue'TensorFlow.GenOps.Core
                                queueEnqueueManyTensorFlow.GenOps.Core
                                queueEnqueueMany'TensorFlow.GenOps.Core
                                queueEnqueueManyV2TensorFlow.GenOps.Core
                                queueEnqueueManyV2'TensorFlow.GenOps.Core
                                queueEnqueueV2TensorFlow.GenOps.Core
                                queueEnqueueV2'TensorFlow.GenOps.Core
                                queueSizeTensorFlow.GenOps.Core
                                queueSize'TensorFlow.GenOps.Core
                                queueSizeV2TensorFlow.GenOps.Core
                                queueSizeV2'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - Q

                                qrTensorFlow.GenOps.Core
                                qr'TensorFlow.GenOps.Core
                                quantizeAndDequantizeTensorFlow.GenOps.Core
                                quantizeAndDequantize'TensorFlow.GenOps.Core
                                quantizeAndDequantizeV2TensorFlow.GenOps.Core
                                quantizeAndDequantizeV2'TensorFlow.GenOps.Core
                                quantizeAndDequantizeV3TensorFlow.GenOps.Core
                                quantizeAndDequantizeV3'TensorFlow.GenOps.Core
                                quantizedAddTensorFlow.GenOps.Core
                                quantizedAdd'TensorFlow.GenOps.Core
                                quantizedAvgPoolTensorFlow.GenOps.Core
                                quantizedAvgPool'TensorFlow.GenOps.Core
                                quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
                                quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
                                quantizedBiasAddTensorFlow.GenOps.Core
                                quantizedBiasAdd'TensorFlow.GenOps.Core
                                quantizedConcatTensorFlow.GenOps.Core
                                quantizedConcat'TensorFlow.GenOps.Core
                                quantizedConv2DTensorFlow.GenOps.Core
                                quantizedConv2D'TensorFlow.GenOps.Core
                                quantizedInstanceNormTensorFlow.GenOps.Core
                                quantizedInstanceNorm'TensorFlow.GenOps.Core
                                quantizedMatMulTensorFlow.GenOps.Core
                                quantizedMatMul'TensorFlow.GenOps.Core
                                quantizedMaxPoolTensorFlow.GenOps.Core
                                quantizedMaxPool'TensorFlow.GenOps.Core
                                quantizedMulTensorFlow.GenOps.Core
                                quantizedMul'TensorFlow.GenOps.Core
                                quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
                                quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
                                quantizedReluTensorFlow.GenOps.Core
                                quantizedRelu'TensorFlow.GenOps.Core
                                quantizedRelu6TensorFlow.GenOps.Core
                                quantizedRelu6'TensorFlow.GenOps.Core
                                quantizedReluXTensorFlow.GenOps.Core
                                quantizedReluX'TensorFlow.GenOps.Core
                                quantizedReshapeTensorFlow.GenOps.Core
                                quantizedReshape'TensorFlow.GenOps.Core
                                quantizedResizeBilinearTensorFlow.GenOps.Core
                                quantizedResizeBilinear'TensorFlow.GenOps.Core
                                quantizeV2TensorFlow.GenOps.Core
                                quantizeV2'TensorFlow.GenOps.Core
                                queueCloseTensorFlow.GenOps.Core
                                queueClose'TensorFlow.GenOps.Core
                                queueCloseV2TensorFlow.GenOps.Core
                                queueCloseV2'TensorFlow.GenOps.Core
                                queueDequeueTensorFlow.GenOps.Core
                                queueDequeue'TensorFlow.GenOps.Core
                                queueDequeueManyTensorFlow.GenOps.Core
                                queueDequeueMany'TensorFlow.GenOps.Core
                                queueDequeueManyV2TensorFlow.GenOps.Core
                                queueDequeueManyV2'TensorFlow.GenOps.Core
                                queueDequeueUpToTensorFlow.GenOps.Core
                                queueDequeueUpTo'TensorFlow.GenOps.Core
                                queueDequeueUpToV2TensorFlow.GenOps.Core
                                queueDequeueUpToV2'TensorFlow.GenOps.Core
                                queueDequeueV2TensorFlow.GenOps.Core
                                queueDequeueV2'TensorFlow.GenOps.Core
                                queueEnqueueTensorFlow.GenOps.Core
                                queueEnqueue'TensorFlow.GenOps.Core
                                queueEnqueueManyTensorFlow.GenOps.Core
                                queueEnqueueMany'TensorFlow.GenOps.Core
                                queueEnqueueManyV2TensorFlow.GenOps.Core
                                queueEnqueueManyV2'TensorFlow.GenOps.Core
                                queueEnqueueV2TensorFlow.GenOps.Core
                                queueEnqueueV2'TensorFlow.GenOps.Core
                                queueIsClosedTensorFlow.GenOps.Core
                                queueIsClosed'TensorFlow.GenOps.Core
                                queueIsClosedV2TensorFlow.GenOps.Core
                                queueIsClosedV2'TensorFlow.GenOps.Core
                                queueSizeTensorFlow.GenOps.Core
                                queueSize'TensorFlow.GenOps.Core
                                queueSizeV2TensorFlow.GenOps.Core
                                queueSizeV2'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html index b3c90e5..82b13e1 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - R)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - R

                                randomCropTensorFlow.GenOps.Core
                                randomCrop'TensorFlow.GenOps.Core
                                randomGammaTensorFlow.GenOps.Core
                                randomGamma'TensorFlow.GenOps.Core
                                randomShuffleTensorFlow.GenOps.Core
                                randomShuffle'TensorFlow.GenOps.Core
                                randomShuffleQueueTensorFlow.GenOps.Core
                                randomShuffleQueue'TensorFlow.GenOps.Core
                                randomShuffleQueueV2TensorFlow.GenOps.Core
                                randomShuffleQueueV2'TensorFlow.GenOps.Core
                                randomStandardNormalTensorFlow.GenOps.Core
                                randomStandardNormal'TensorFlow.GenOps.Core
                                randomUniformTensorFlow.GenOps.Core
                                randomUniform'TensorFlow.GenOps.Core
                                randomUniformIntTensorFlow.GenOps.Core
                                randomUniformInt'TensorFlow.GenOps.Core
                                rangeTensorFlow.GenOps.Core
                                range'TensorFlow.GenOps.Core
                                rankTensorFlow.GenOps.Core
                                rank'TensorFlow.GenOps.Core
                                readerNumRecordsProducedTensorFlow.GenOps.Core
                                readerNumRecordsProduced'TensorFlow.GenOps.Core
                                readerNumRecordsProducedV2TensorFlow.GenOps.Core
                                readerNumRecordsProducedV2'TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
                                readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
                                readerReadTensorFlow.GenOps.Core
                                readerRead'TensorFlow.GenOps.Core
                                readerReadUpToTensorFlow.GenOps.Core
                                readerReadUpTo'TensorFlow.GenOps.Core
                                readerReadUpToV2TensorFlow.GenOps.Core
                                readerReadUpToV2'TensorFlow.GenOps.Core
                                readerReadV2TensorFlow.GenOps.Core
                                readerReadV2'TensorFlow.GenOps.Core
                                readerResetTensorFlow.GenOps.Core
                                readerReset'TensorFlow.GenOps.Core
                                readerResetV2TensorFlow.GenOps.Core
                                readerResetV2'TensorFlow.GenOps.Core
                                readerRestoreStateTensorFlow.GenOps.Core
                                readerRestoreState'TensorFlow.GenOps.Core
                                readerRestoreStateV2TensorFlow.GenOps.Core
                                readerRestoreStateV2'TensorFlow.GenOps.Core
                                readerSerializeStateTensorFlow.GenOps.Core
                                readerSerializeState'TensorFlow.GenOps.Core
                                readerSerializeStateV2TensorFlow.GenOps.Core
                                readerSerializeStateV2'TensorFlow.GenOps.Core
                                readFileTensorFlow.GenOps.Core
                                readFile'TensorFlow.GenOps.Core
                                readVariableOpTensorFlow.GenOps.Core
                                readVariableOp'TensorFlow.GenOps.Core
                                realTensorFlow.GenOps.Core
                                real'TensorFlow.GenOps.Core
                                realDivTensorFlow.GenOps.Core
                                realDiv'TensorFlow.GenOps.Core
                                reciprocalTensorFlow.GenOps.Core
                                reciprocal'TensorFlow.GenOps.Core
                                reciprocalGradTensorFlow.GenOps.Core
                                reciprocalGrad'TensorFlow.GenOps.Core
                                recordInputTensorFlow.GenOps.Core
                                recordInput'TensorFlow.GenOps.Core
                                reduceJoinTensorFlow.GenOps.Core
                                reduceJoin'TensorFlow.GenOps.Core
                                refEnterTensorFlow.GenOps.Core
                                refEnter'TensorFlow.GenOps.Core
                                refExitTensorFlow.GenOps.Core
                                refExit'TensorFlow.GenOps.Core
                                refIdentityTensorFlow.GenOps.Core
                                refIdentity'TensorFlow.GenOps.Core
                                refMergeTensorFlow.GenOps.Core
                                refMerge'TensorFlow.GenOps.Core
                                refNextIterationTensorFlow.GenOps.Core
                                refNextIteration'TensorFlow.GenOps.Core
                                refSelectTensorFlow.GenOps.Core
                                refSelect'TensorFlow.GenOps.Core
                                refSwitchTensorFlow.GenOps.Core
                                refSwitch'TensorFlow.GenOps.Core
                                reluTensorFlow.GenOps.Core
                                relu'TensorFlow.GenOps.Core
                                relu6TensorFlow.GenOps.Core
                                relu6'TensorFlow.GenOps.Core
                                relu6GradTensorFlow.GenOps.Core
                                relu6Grad'TensorFlow.GenOps.Core
                                reluGradTensorFlow.GenOps.Core
                                reluGrad'TensorFlow.GenOps.Core
                                requantizationRangeTensorFlow.GenOps.Core
                                requantizationRange'TensorFlow.GenOps.Core
                                requantizeTensorFlow.GenOps.Core
                                requantize'TensorFlow.GenOps.Core
                                reshapeTensorFlow.GenOps.Core
                                reshape'TensorFlow.GenOps.Core
                                resizeAreaTensorFlow.GenOps.Core
                                resizeArea'TensorFlow.GenOps.Core
                                resizeBicubicTensorFlow.GenOps.Core
                                resizeBicubic'TensorFlow.GenOps.Core
                                resizeBilinearTensorFlow.GenOps.Core
                                resizeBilinear'TensorFlow.GenOps.Core
                                resizeBilinearGradTensorFlow.GenOps.Core
                                resizeBilinearGrad'TensorFlow.GenOps.Core
                                resizeNearestNeighborTensorFlow.GenOps.Core
                                resizeNearestNeighbor'TensorFlow.GenOps.Core
                                resizeNearestNeighborGradTensorFlow.GenOps.Core
                                resizeNearestNeighborGrad'TensorFlow.GenOps.Core
                                resourceApplyAdadeltaTensorFlow.GenOps.Core
                                resourceApplyAdadelta'TensorFlow.GenOps.Core
                                resourceApplyAdagradTensorFlow.GenOps.Core
                                resourceApplyAdagrad'TensorFlow.GenOps.Core
                                resourceApplyAdagradDATensorFlow.GenOps.Core
                                resourceApplyAdagradDA'TensorFlow.GenOps.Core
                                resourceApplyAdamTensorFlow.GenOps.Core
                                resourceApplyAdam'TensorFlow.GenOps.Core
                                resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
                                resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                resourceApplyFtrlTensorFlow.GenOps.Core
                                resourceApplyFtrl'TensorFlow.GenOps.Core
                                resourceApplyGradientDescentTensorFlow.GenOps.Core
                                resourceApplyGradientDescent'TensorFlow.GenOps.Core
                                resourceApplyMomentumTensorFlow.GenOps.Core
                                resourceApplyMomentum'TensorFlow.GenOps.Core
                                resourceApplyProximalAdagradTensorFlow.GenOps.Core
                                resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
                                resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
                                resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                resourceApplyRMSPropTensorFlow.GenOps.Core
                                resourceApplyRMSProp'TensorFlow.GenOps.Core
                                resourceGatherTensorFlow.GenOps.Core
                                resourceGather'TensorFlow.GenOps.Core
                                resourceScatterAddTensorFlow.GenOps.Core
                                resourceScatterAdd'TensorFlow.GenOps.Core
                                resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
                                resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
                                resourceSparseApplyAdagradTensorFlow.GenOps.Core
                                resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
                                resourceSparseApplyAdagradDATensorFlow.GenOps.Core
                                resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
                                resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
                                resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                resourceSparseApplyFtrlTensorFlow.GenOps.Core
                                resourceSparseApplyFtrl'TensorFlow.GenOps.Core
                                resourceSparseApplyMomentumTensorFlow.GenOps.Core
                                resourceSparseApplyMomentum'TensorFlow.GenOps.Core
                                resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
                                resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
                                resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
                                resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                resourceSparseApplyRMSPropTensorFlow.GenOps.Core
                                resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
                                restoreTensorFlow.GenOps.Core
                                restore'TensorFlow.GenOps.Core
                                restoreSliceTensorFlow.GenOps.Core
                                restoreSlice'TensorFlow.GenOps.Core
                                restoreV2TensorFlow.GenOps.Core
                                restoreV2'TensorFlow.GenOps.Core
                                reverseTensorFlow.GenOps.Core
                                reverse'TensorFlow.GenOps.Core
                                reverseSequenceTensorFlow.GenOps.Core
                                reverseSequence'TensorFlow.GenOps.Core
                                reverseV2TensorFlow.GenOps.Core
                                reverseV2'TensorFlow.GenOps.Core
                                rGBToHSVTensorFlow.GenOps.Core
                                rGBToHSV'TensorFlow.GenOps.Core
                                rintTensorFlow.GenOps.Core
                                rint'TensorFlow.GenOps.Core
                                roundTensorFlow.GenOps.Core
                                round'TensorFlow.GenOps.Core
                                rsqrtTensorFlow.GenOps.Core
                                rsqrt'TensorFlow.GenOps.Core
                                rsqrtGradTensorFlow.GenOps.Core
                                rsqrtGrad'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - R

                                randomCropTensorFlow.GenOps.Core
                                randomCrop'TensorFlow.GenOps.Core
                                randomGammaTensorFlow.GenOps.Core
                                randomGamma'TensorFlow.GenOps.Core
                                randomPoissonTensorFlow.GenOps.Core
                                randomPoisson'TensorFlow.GenOps.Core
                                randomShuffleTensorFlow.GenOps.Core
                                randomShuffle'TensorFlow.GenOps.Core
                                randomShuffleQueueTensorFlow.GenOps.Core
                                randomShuffleQueue'TensorFlow.GenOps.Core
                                randomShuffleQueueV2TensorFlow.GenOps.Core
                                randomShuffleQueueV2'TensorFlow.GenOps.Core
                                randomStandardNormalTensorFlow.GenOps.Core
                                randomStandardNormal'TensorFlow.GenOps.Core
                                randomUniformTensorFlow.GenOps.Core
                                randomUniform'TensorFlow.GenOps.Core
                                randomUniformIntTensorFlow.GenOps.Core
                                randomUniformInt'TensorFlow.GenOps.Core
                                rangeTensorFlow.GenOps.Core
                                range'TensorFlow.GenOps.Core
                                rangeDatasetTensorFlow.GenOps.Core
                                rangeDataset'TensorFlow.GenOps.Core
                                rankTensorFlow.GenOps.Core
                                rank'TensorFlow.GenOps.Core
                                readerNumRecordsProducedTensorFlow.GenOps.Core
                                readerNumRecordsProduced'TensorFlow.GenOps.Core
                                readerNumRecordsProducedV2TensorFlow.GenOps.Core
                                readerNumRecordsProducedV2'TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
                                readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
                                readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
                                readerReadTensorFlow.GenOps.Core
                                readerRead'TensorFlow.GenOps.Core
                                readerReadUpToTensorFlow.GenOps.Core
                                readerReadUpTo'TensorFlow.GenOps.Core
                                readerReadUpToV2TensorFlow.GenOps.Core
                                readerReadUpToV2'TensorFlow.GenOps.Core
                                readerReadV2TensorFlow.GenOps.Core
                                readerReadV2'TensorFlow.GenOps.Core
                                readerResetTensorFlow.GenOps.Core
                                readerReset'TensorFlow.GenOps.Core
                                readerResetV2TensorFlow.GenOps.Core
                                readerResetV2'TensorFlow.GenOps.Core
                                readerRestoreStateTensorFlow.GenOps.Core
                                readerRestoreState'TensorFlow.GenOps.Core
                                readerRestoreStateV2TensorFlow.GenOps.Core
                                readerRestoreStateV2'TensorFlow.GenOps.Core
                                readerSerializeStateTensorFlow.GenOps.Core
                                readerSerializeState'TensorFlow.GenOps.Core
                                readerSerializeStateV2TensorFlow.GenOps.Core
                                readerSerializeStateV2'TensorFlow.GenOps.Core
                                readFileTensorFlow.GenOps.Core
                                readFile'TensorFlow.GenOps.Core
                                readVariableOpTensorFlow.GenOps.Core
                                readVariableOp'TensorFlow.GenOps.Core
                                realTensorFlow.GenOps.Core
                                real'TensorFlow.GenOps.Core
                                realDivTensorFlow.GenOps.Core
                                realDiv'TensorFlow.GenOps.Core
                                reciprocalTensorFlow.GenOps.Core
                                reciprocal'TensorFlow.GenOps.Core
                                reciprocalGradTensorFlow.GenOps.Core
                                reciprocalGrad'TensorFlow.GenOps.Core
                                recordInputTensorFlow.GenOps.Core
                                recordInput'TensorFlow.GenOps.Core
                                reduceJoinTensorFlow.GenOps.Core
                                reduceJoin'TensorFlow.GenOps.Core
                                refEnterTensorFlow.GenOps.Core
                                refEnter'TensorFlow.GenOps.Core
                                refExitTensorFlow.GenOps.Core
                                refExit'TensorFlow.GenOps.Core
                                refIdentityTensorFlow.GenOps.Core
                                refIdentity'TensorFlow.GenOps.Core
                                refMergeTensorFlow.GenOps.Core
                                refMerge'TensorFlow.GenOps.Core
                                refNextIterationTensorFlow.GenOps.Core
                                refNextIteration'TensorFlow.GenOps.Core
                                refSelectTensorFlow.GenOps.Core
                                refSelect'TensorFlow.GenOps.Core
                                refSwitchTensorFlow.GenOps.Core
                                refSwitch'TensorFlow.GenOps.Core
                                reluTensorFlow.GenOps.Core
                                relu'TensorFlow.GenOps.Core
                                relu6TensorFlow.GenOps.Core
                                relu6'TensorFlow.GenOps.Core
                                relu6GradTensorFlow.GenOps.Core
                                relu6Grad'TensorFlow.GenOps.Core
                                reluGradTensorFlow.GenOps.Core
                                reluGrad'TensorFlow.GenOps.Core
                                remoteFusedGraphExecuteTensorFlow.GenOps.Core
                                remoteFusedGraphExecute'TensorFlow.GenOps.Core
                                repeatDatasetTensorFlow.GenOps.Core
                                repeatDataset'TensorFlow.GenOps.Core
                                requantizationRangeTensorFlow.GenOps.Core
                                requantizationRange'TensorFlow.GenOps.Core
                                requantizeTensorFlow.GenOps.Core
                                requantize'TensorFlow.GenOps.Core
                                reshapeTensorFlow.GenOps.Core
                                reshape'TensorFlow.GenOps.Core
                                resizeAreaTensorFlow.GenOps.Core
                                resizeArea'TensorFlow.GenOps.Core
                                resizeBicubicTensorFlow.GenOps.Core
                                resizeBicubic'TensorFlow.GenOps.Core
                                resizeBilinearTensorFlow.GenOps.Core
                                resizeBilinear'TensorFlow.GenOps.Core
                                resizeBilinearGradTensorFlow.GenOps.Core
                                resizeBilinearGrad'TensorFlow.GenOps.Core
                                resizeNearestNeighborTensorFlow.GenOps.Core
                                resizeNearestNeighbor'TensorFlow.GenOps.Core
                                resizeNearestNeighborGradTensorFlow.GenOps.Core
                                resizeNearestNeighborGrad'TensorFlow.GenOps.Core
                                resourceApplyAdadeltaTensorFlow.GenOps.Core
                                resourceApplyAdadelta'TensorFlow.GenOps.Core
                                resourceApplyAdagradTensorFlow.GenOps.Core
                                resourceApplyAdagrad'TensorFlow.GenOps.Core
                                resourceApplyAdagradDATensorFlow.GenOps.Core
                                resourceApplyAdagradDA'TensorFlow.GenOps.Core
                                resourceApplyAdamTensorFlow.GenOps.Core
                                resourceApplyAdam'TensorFlow.GenOps.Core
                                resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
                                resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                resourceApplyFtrlTensorFlow.GenOps.Core
                                resourceApplyFtrl'TensorFlow.GenOps.Core
                                resourceApplyFtrlV2TensorFlow.GenOps.Core
                                resourceApplyFtrlV2'TensorFlow.GenOps.Core
                                resourceApplyGradientDescentTensorFlow.GenOps.Core
                                resourceApplyGradientDescent'TensorFlow.GenOps.Core
                                resourceApplyMomentumTensorFlow.GenOps.Core
                                resourceApplyMomentum'TensorFlow.GenOps.Core
                                resourceApplyProximalAdagradTensorFlow.GenOps.Core
                                resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
                                resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
                                resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                resourceApplyRMSPropTensorFlow.GenOps.Core
                                resourceApplyRMSProp'TensorFlow.GenOps.Core
                                resourceGatherTensorFlow.GenOps.Core
                                resourceGather'TensorFlow.GenOps.Core
                                resourceScatterAddTensorFlow.GenOps.Core
                                resourceScatterAdd'TensorFlow.GenOps.Core
                                resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
                                resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
                                resourceSparseApplyAdagradTensorFlow.GenOps.Core
                                resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
                                resourceSparseApplyAdagradDATensorFlow.GenOps.Core
                                resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
                                resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
                                resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                resourceSparseApplyFtrlTensorFlow.GenOps.Core
                                resourceSparseApplyFtrl'TensorFlow.GenOps.Core
                                resourceSparseApplyFtrlV2TensorFlow.GenOps.Core
                                resourceSparseApplyFtrlV2'TensorFlow.GenOps.Core
                                resourceSparseApplyMomentumTensorFlow.GenOps.Core
                                resourceSparseApplyMomentum'TensorFlow.GenOps.Core
                                resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
                                resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
                                resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
                                resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                resourceSparseApplyRMSPropTensorFlow.GenOps.Core
                                resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
                                resourceStridedSliceAssignTensorFlow.GenOps.Core
                                resourceStridedSliceAssign'TensorFlow.GenOps.Core
                                restoreTensorFlow.GenOps.Core
                                restore'TensorFlow.GenOps.Core
                                restoreSliceTensorFlow.GenOps.Core
                                restoreSlice'TensorFlow.GenOps.Core
                                restoreV2TensorFlow.GenOps.Core
                                restoreV2'TensorFlow.GenOps.Core
                                reverseTensorFlow.GenOps.Core
                                reverse'TensorFlow.GenOps.Core
                                reverseSequenceTensorFlow.GenOps.Core
                                reverseSequence'TensorFlow.GenOps.Core
                                reverseV2TensorFlow.GenOps.Core
                                reverseV2'TensorFlow.GenOps.Core
                                rFFTTensorFlow.GenOps.Core
                                rFFT'TensorFlow.GenOps.Core
                                rFFT2DTensorFlow.GenOps.Core
                                rFFT2D'TensorFlow.GenOps.Core
                                rFFT3DTensorFlow.GenOps.Core
                                rFFT3D'TensorFlow.GenOps.Core
                                rGBToHSVTensorFlow.GenOps.Core
                                rGBToHSV'TensorFlow.GenOps.Core
                                rintTensorFlow.GenOps.Core
                                rint'TensorFlow.GenOps.Core
                                roundTensorFlow.GenOps.Core
                                round'TensorFlow.GenOps.Core
                                rsqrtTensorFlow.GenOps.Core
                                rsqrt'TensorFlow.GenOps.Core
                                rsqrtGradTensorFlow.GenOps.Core
                                rsqrtGrad'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html index 7771021..d85e24e 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - S)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - S

                                sampleDistortedBoundingBoxTensorFlow.GenOps.Core
                                sampleDistortedBoundingBox'TensorFlow.GenOps.Core
                                saveTensorFlow.GenOps.Core
                                save'TensorFlow.GenOps.Core
                                saveSlicesTensorFlow.GenOps.Core
                                saveSlices'TensorFlow.GenOps.Core
                                saveV2TensorFlow.GenOps.Core
                                saveV2'TensorFlow.GenOps.Core
                                scalarSummaryTensorFlow.GenOps.Core
                                scalarSummary'TensorFlow.GenOps.Core
                                scatterAddTensorFlow.GenOps.Core
                                scatterAdd'TensorFlow.GenOps.Core
                                scatterDivTensorFlow.GenOps.Core
                                scatterDiv'TensorFlow.GenOps.Core
                                scatterMulTensorFlow.GenOps.Core
                                scatterMul'TensorFlow.GenOps.Core
                                scatterNdTensorFlow.GenOps.Core
                                scatterNd'TensorFlow.GenOps.Core
                                scatterNdAddTensorFlow.GenOps.Core
                                scatterNdAdd'TensorFlow.GenOps.Core
                                scatterNdSubTensorFlow.GenOps.Core
                                scatterNdSub'TensorFlow.GenOps.Core
                                scatterNdUpdateTensorFlow.GenOps.Core
                                scatterNdUpdate'TensorFlow.GenOps.Core
                                scatterSubTensorFlow.GenOps.Core
                                scatterSub'TensorFlow.GenOps.Core
                                scatterUpdateTensorFlow.GenOps.Core
                                scatterUpdate'TensorFlow.GenOps.Core
                                sdcaFprintTensorFlow.GenOps.Core
                                sdcaFprint'TensorFlow.GenOps.Core
                                sdcaOptimizerTensorFlow.GenOps.Core
                                sdcaOptimizer'TensorFlow.GenOps.Core
                                sdcaShrinkL1TensorFlow.GenOps.Core
                                sdcaShrinkL1'TensorFlow.GenOps.Core
                                segmentMaxTensorFlow.GenOps.Core
                                segmentMax'TensorFlow.GenOps.Core
                                segmentMeanTensorFlow.GenOps.Core
                                segmentMean'TensorFlow.GenOps.Core
                                segmentMinTensorFlow.GenOps.Core
                                segmentMin'TensorFlow.GenOps.Core
                                segmentProdTensorFlow.GenOps.Core
                                segmentProd'TensorFlow.GenOps.Core
                                segmentSumTensorFlow.GenOps.Core
                                segmentSum'TensorFlow.GenOps.Core
                                selectTensorFlow.GenOps.Core
                                select'TensorFlow.GenOps.Core
                                selfAdjointEigTensorFlow.GenOps.Core
                                selfAdjointEig'TensorFlow.GenOps.Core
                                selfAdjointEigV2TensorFlow.GenOps.Core
                                selfAdjointEigV2'TensorFlow.GenOps.Core
                                serializeManySparseTensorFlow.GenOps.Core
                                serializeManySparse'TensorFlow.GenOps.Core
                                serializeSparseTensorFlow.GenOps.Core
                                serializeSparse'TensorFlow.GenOps.Core
                                setSizeTensorFlow.GenOps.Core
                                setSize'TensorFlow.GenOps.Core
                                shapeTensorFlow.GenOps.Core
                                shape'TensorFlow.GenOps.Core
                                shapeNTensorFlow.GenOps.Core
                                shapeN'TensorFlow.GenOps.Core
                                shardedFilenameTensorFlow.GenOps.Core
                                shardedFilename'TensorFlow.GenOps.Core
                                shardedFilespecTensorFlow.GenOps.Core
                                shardedFilespec'TensorFlow.GenOps.Core
                                sigmoidTensorFlow.GenOps.Core
                                sigmoid'TensorFlow.GenOps.Core
                                sigmoidGradTensorFlow.GenOps.Core
                                sigmoidGrad'TensorFlow.GenOps.Core
                                signTensorFlow.GenOps.Core
                                sign'TensorFlow.GenOps.Core
                                sinTensorFlow.GenOps.Core
                                sin'TensorFlow.GenOps.Core
                                sizeTensorFlow.GenOps.Core
                                size'TensorFlow.GenOps.Core
                                skipgramTensorFlow.GenOps.Core
                                skipgram'TensorFlow.GenOps.Core
                                sliceTensorFlow.GenOps.Core
                                slice'TensorFlow.GenOps.Core
                                softmaxTensorFlow.GenOps.Core
                                softmax'TensorFlow.GenOps.Core
                                softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
                                softmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
                                softplusTensorFlow.GenOps.Core
                                softplus'TensorFlow.GenOps.Core
                                softplusGradTensorFlow.GenOps.Core
                                softplusGrad'TensorFlow.GenOps.Core
                                softsignTensorFlow.GenOps.Core
                                softsign'TensorFlow.GenOps.Core
                                softsignGradTensorFlow.GenOps.Core
                                softsignGrad'TensorFlow.GenOps.Core
                                spaceToBatchTensorFlow.GenOps.Core
                                spaceToBatch'TensorFlow.GenOps.Core
                                spaceToBatchNDTensorFlow.GenOps.Core
                                spaceToBatchND'TensorFlow.GenOps.Core
                                spaceToDepthTensorFlow.GenOps.Core
                                spaceToDepth'TensorFlow.GenOps.Core
                                sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
                                sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
                                sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
                                sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
                                sparseAddTensorFlow.GenOps.Core
                                sparseAdd'TensorFlow.GenOps.Core
                                sparseAddGradTensorFlow.GenOps.Core
                                sparseAddGrad'TensorFlow.GenOps.Core
                                sparseApplyAdadeltaTensorFlow.GenOps.Core
                                sparseApplyAdadelta'TensorFlow.GenOps.Core
                                sparseApplyAdagradTensorFlow.GenOps.Core
                                sparseApplyAdagrad'TensorFlow.GenOps.Core
                                sparseApplyAdagradDATensorFlow.GenOps.Core
                                sparseApplyAdagradDA'TensorFlow.GenOps.Core
                                sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
                                sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                sparseApplyFtrlTensorFlow.GenOps.Core
                                sparseApplyFtrl'TensorFlow.GenOps.Core
                                sparseApplyMomentumTensorFlow.GenOps.Core
                                sparseApplyMomentum'TensorFlow.GenOps.Core
                                sparseApplyProximalAdagradTensorFlow.GenOps.Core
                                sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
                                sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
                                sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                sparseApplyRMSPropTensorFlow.GenOps.Core
                                sparseApplyRMSProp'TensorFlow.GenOps.Core
                                sparseConcatTensorFlow.GenOps.Core
                                sparseConcat'TensorFlow.GenOps.Core
                                sparseConditionalAccumulatorTensorFlow.GenOps.Core
                                sparseConditionalAccumulator'TensorFlow.GenOps.Core
                                sparseDenseCwiseAddTensorFlow.GenOps.Core
                                sparseDenseCwiseAdd'TensorFlow.GenOps.Core
                                sparseDenseCwiseDivTensorFlow.GenOps.Core
                                sparseDenseCwiseDiv'TensorFlow.GenOps.Core
                                sparseDenseCwiseMulTensorFlow.GenOps.Core
                                sparseDenseCwiseMul'TensorFlow.GenOps.Core
                                sparseMatMulTensorFlow.GenOps.Core
                                sparseMatMul'TensorFlow.GenOps.Core
                                sparseReduceSumTensorFlow.GenOps.Core
                                sparseReduceSum'TensorFlow.GenOps.Core
                                sparseReduceSumSparseTensorFlow.GenOps.Core
                                sparseReduceSumSparse'TensorFlow.GenOps.Core
                                sparseReorderTensorFlow.GenOps.Core
                                sparseReorder'TensorFlow.GenOps.Core
                                sparseReshapeTensorFlow.GenOps.Core
                                sparseReshape'TensorFlow.GenOps.Core
                                sparseSegmentMeanTensorFlow.GenOps.Core
                                sparseSegmentMean'TensorFlow.GenOps.Core
                                sparseSegmentMeanGradTensorFlow.GenOps.Core
                                sparseSegmentMeanGrad'TensorFlow.GenOps.Core
                                sparseSegmentSqrtNTensorFlow.GenOps.Core
                                sparseSegmentSqrtN'TensorFlow.GenOps.Core
                                sparseSegmentSqrtNGradTensorFlow.GenOps.Core
                                sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
                                sparseSegmentSumTensorFlow.GenOps.Core
                                sparseSegmentSum'TensorFlow.GenOps.Core
                                sparseSoftmaxTensorFlow.GenOps.Core
                                sparseSoftmax'TensorFlow.GenOps.Core
                                sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
                                sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
                                sparseSparseMaximumTensorFlow.GenOps.Core
                                sparseSparseMaximum'TensorFlow.GenOps.Core
                                sparseSparseMinimumTensorFlow.GenOps.Core
                                sparseSparseMinimum'TensorFlow.GenOps.Core
                                sparseSplitTensorFlow.GenOps.Core
                                sparseSplit'TensorFlow.GenOps.Core
                                sparseTensorDenseAddTensorFlow.GenOps.Core
                                sparseTensorDenseAdd'TensorFlow.GenOps.Core
                                sparseTensorDenseMatMulTensorFlow.GenOps.Core
                                sparseTensorDenseMatMul'TensorFlow.GenOps.Core
                                sparseToDenseTensorFlow.GenOps.Core
                                sparseToDense'TensorFlow.GenOps.Core
                                sparseToSparseSetOperationTensorFlow.GenOps.Core
                                sparseToSparseSetOperation'TensorFlow.GenOps.Core
                                splitTensorFlow.GenOps.Core
                                split'TensorFlow.GenOps.Core
                                splitVTensorFlow.GenOps.Core
                                splitV'TensorFlow.GenOps.Core
                                sqrtTensorFlow.GenOps.Core
                                sqrt'TensorFlow.GenOps.Core
                                sqrtGradTensorFlow.GenOps.Core
                                sqrtGrad'TensorFlow.GenOps.Core
                                squareTensorFlow.GenOps.Core
                                square'TensorFlow.GenOps.Core
                                squaredDifferenceTensorFlow.GenOps.Core
                                squaredDifference'TensorFlow.GenOps.Core
                                squeezeTensorFlow.GenOps.Core
                                squeeze'TensorFlow.GenOps.Core
                                stackTensorFlow.GenOps.Core
                                stack'TensorFlow.GenOps.Core
                                stackCloseTensorFlow.GenOps.Core
                                stackClose'TensorFlow.GenOps.Core
                                stackPopTensorFlow.GenOps.Core
                                stackPop'TensorFlow.GenOps.Core
                                stackPushTensorFlow.GenOps.Core
                                stackPush'TensorFlow.GenOps.Core
                                stageTensorFlow.GenOps.Core
                                stage'TensorFlow.GenOps.Core
                                stopGradientTensorFlow.GenOps.Core
                                stopGradient'TensorFlow.GenOps.Core
                                stridedSliceTensorFlow.GenOps.Core
                                stridedSlice'TensorFlow.GenOps.Core
                                stridedSliceAssignTensorFlow.GenOps.Core
                                stridedSliceAssign'TensorFlow.GenOps.Core
                                stridedSliceGradTensorFlow.GenOps.Core
                                stridedSliceGrad'TensorFlow.GenOps.Core
                                stringJoinTensorFlow.GenOps.Core
                                stringJoin'TensorFlow.GenOps.Core
                                stringSplitTensorFlow.GenOps.Core
                                stringSplit'TensorFlow.GenOps.Core
                                stringToHashBucketTensorFlow.GenOps.Core
                                stringToHashBucket'TensorFlow.GenOps.Core
                                stringToHashBucketFastTensorFlow.GenOps.Core
                                stringToHashBucketFast'TensorFlow.GenOps.Core
                                stringToHashBucketStrongTensorFlow.GenOps.Core
                                stringToHashBucketStrong'TensorFlow.GenOps.Core
                                stringToNumberTensorFlow.GenOps.Core
                                stringToNumber'TensorFlow.GenOps.Core
                                subTensorFlow.GenOps.Core
                                sub'TensorFlow.GenOps.Core
                                substrTensorFlow.GenOps.Core
                                substr'TensorFlow.GenOps.Core
                                sumTensorFlow.GenOps.Core
                                sum'TensorFlow.GenOps.Core
                                svdTensorFlow.GenOps.Core
                                svd'TensorFlow.GenOps.Core
                                switchTensorFlow.GenOps.Core
                                switch'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - S

                                sampleDistortedBoundingBoxTensorFlow.GenOps.Core
                                sampleDistortedBoundingBox'TensorFlow.GenOps.Core
                                sampleDistortedBoundingBoxV2TensorFlow.GenOps.Core
                                sampleDistortedBoundingBoxV2'TensorFlow.GenOps.Core
                                saveTensorFlow.GenOps.Core
                                save'TensorFlow.GenOps.Core
                                saveSlicesTensorFlow.GenOps.Core
                                saveSlices'TensorFlow.GenOps.Core
                                saveV2TensorFlow.GenOps.Core
                                saveV2'TensorFlow.GenOps.Core
                                scalarSummaryTensorFlow.GenOps.Core
                                scalarSummary'TensorFlow.GenOps.Core
                                scatterAddTensorFlow.GenOps.Core
                                scatterAdd'TensorFlow.GenOps.Core
                                scatterDivTensorFlow.GenOps.Core
                                scatterDiv'TensorFlow.GenOps.Core
                                scatterMulTensorFlow.GenOps.Core
                                scatterMul'TensorFlow.GenOps.Core
                                scatterNdTensorFlow.GenOps.Core
                                scatterNd'TensorFlow.GenOps.Core
                                scatterNdAddTensorFlow.GenOps.Core
                                scatterNdAdd'TensorFlow.GenOps.Core
                                scatterNdNonAliasingAddTensorFlow.GenOps.Core
                                scatterNdNonAliasingAdd'TensorFlow.GenOps.Core
                                scatterNdSubTensorFlow.GenOps.Core
                                scatterNdSub'TensorFlow.GenOps.Core
                                scatterNdUpdateTensorFlow.GenOps.Core
                                scatterNdUpdate'TensorFlow.GenOps.Core
                                scatterSubTensorFlow.GenOps.Core
                                scatterSub'TensorFlow.GenOps.Core
                                scatterUpdateTensorFlow.GenOps.Core
                                scatterUpdate'TensorFlow.GenOps.Core
                                sdcaFprintTensorFlow.GenOps.Core
                                sdcaFprint'TensorFlow.GenOps.Core
                                sdcaOptimizerTensorFlow.GenOps.Core
                                sdcaOptimizer'TensorFlow.GenOps.Core
                                sdcaShrinkL1TensorFlow.GenOps.Core
                                sdcaShrinkL1'TensorFlow.GenOps.Core
                                segmentMaxTensorFlow.GenOps.Core
                                segmentMax'TensorFlow.GenOps.Core
                                segmentMeanTensorFlow.GenOps.Core
                                segmentMean'TensorFlow.GenOps.Core
                                segmentMinTensorFlow.GenOps.Core
                                segmentMin'TensorFlow.GenOps.Core
                                segmentProdTensorFlow.GenOps.Core
                                segmentProd'TensorFlow.GenOps.Core
                                segmentSumTensorFlow.GenOps.Core
                                segmentSum'TensorFlow.GenOps.Core
                                selectTensorFlow.GenOps.Core
                                select'TensorFlow.GenOps.Core
                                selfAdjointEigTensorFlow.GenOps.Core
                                selfAdjointEig'TensorFlow.GenOps.Core
                                selfAdjointEigV2TensorFlow.GenOps.Core
                                selfAdjointEigV2'TensorFlow.GenOps.Core
                                serializeManySparseTensorFlow.GenOps.Core
                                serializeManySparse'TensorFlow.GenOps.Core
                                serializeSparseTensorFlow.GenOps.Core
                                serializeSparse'TensorFlow.GenOps.Core
                                setSizeTensorFlow.GenOps.Core
                                setSize'TensorFlow.GenOps.Core
                                shapeTensorFlow.GenOps.Core
                                shape'TensorFlow.GenOps.Core
                                shapeNTensorFlow.GenOps.Core
                                shapeN'TensorFlow.GenOps.Core
                                shardedFilenameTensorFlow.GenOps.Core
                                shardedFilename'TensorFlow.GenOps.Core
                                shardedFilespecTensorFlow.GenOps.Core
                                shardedFilespec'TensorFlow.GenOps.Core
                                shuffleDatasetTensorFlow.GenOps.Core
                                shuffleDataset'TensorFlow.GenOps.Core
                                sigmoidTensorFlow.GenOps.Core
                                sigmoid'TensorFlow.GenOps.Core
                                sigmoidGradTensorFlow.GenOps.Core
                                sigmoidGrad'TensorFlow.GenOps.Core
                                signTensorFlow.GenOps.Core
                                sign'TensorFlow.GenOps.Core
                                sinTensorFlow.GenOps.Core
                                sin'TensorFlow.GenOps.Core
                                sinhTensorFlow.GenOps.Core
                                sinh'TensorFlow.GenOps.Core
                                sizeTensorFlow.GenOps.Core
                                size'TensorFlow.GenOps.Core
                                skipDatasetTensorFlow.GenOps.Core
                                skipDataset'TensorFlow.GenOps.Core
                                skipgramTensorFlow.GenOps.Core
                                skipgram'TensorFlow.GenOps.Core
                                sliceTensorFlow.GenOps.Core
                                slice'TensorFlow.GenOps.Core
                                softmaxTensorFlow.GenOps.Core
                                softmax'TensorFlow.GenOps.Core
                                softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
                                softmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
                                softplusTensorFlow.GenOps.Core
                                softplus'TensorFlow.GenOps.Core
                                softplusGradTensorFlow.GenOps.Core
                                softplusGrad'TensorFlow.GenOps.Core
                                softsignTensorFlow.GenOps.Core
                                softsign'TensorFlow.GenOps.Core
                                softsignGradTensorFlow.GenOps.Core
                                softsignGrad'TensorFlow.GenOps.Core
                                spaceToBatchTensorFlow.GenOps.Core
                                spaceToBatch'TensorFlow.GenOps.Core
                                spaceToBatchNDTensorFlow.GenOps.Core
                                spaceToBatchND'TensorFlow.GenOps.Core
                                spaceToDepthTensorFlow.GenOps.Core
                                spaceToDepth'TensorFlow.GenOps.Core
                                sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
                                sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
                                sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
                                sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
                                sparseAddTensorFlow.GenOps.Core
                                sparseAdd'TensorFlow.GenOps.Core
                                sparseAddGradTensorFlow.GenOps.Core
                                sparseAddGrad'TensorFlow.GenOps.Core
                                sparseApplyAdadeltaTensorFlow.GenOps.Core
                                sparseApplyAdadelta'TensorFlow.GenOps.Core
                                sparseApplyAdagradTensorFlow.GenOps.Core
                                sparseApplyAdagrad'TensorFlow.GenOps.Core
                                sparseApplyAdagradDATensorFlow.GenOps.Core
                                sparseApplyAdagradDA'TensorFlow.GenOps.Core
                                sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
                                sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
                                sparseApplyFtrlTensorFlow.GenOps.Core
                                sparseApplyFtrl'TensorFlow.GenOps.Core
                                sparseApplyFtrlV2TensorFlow.GenOps.Core
                                sparseApplyFtrlV2'TensorFlow.GenOps.Core
                                sparseApplyMomentumTensorFlow.GenOps.Core
                                sparseApplyMomentum'TensorFlow.GenOps.Core
                                sparseApplyProximalAdagradTensorFlow.GenOps.Core
                                sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
                                sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
                                sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
                                sparseApplyRMSPropTensorFlow.GenOps.Core
                                sparseApplyRMSProp'TensorFlow.GenOps.Core
                                sparseConcatTensorFlow.GenOps.Core
                                sparseConcat'TensorFlow.GenOps.Core
                                sparseConditionalAccumulatorTensorFlow.GenOps.Core
                                sparseConditionalAccumulator'TensorFlow.GenOps.Core
                                sparseCrossTensorFlow.GenOps.Core
                                sparseCross'TensorFlow.GenOps.Core
                                sparseDenseCwiseAddTensorFlow.GenOps.Core
                                sparseDenseCwiseAdd'TensorFlow.GenOps.Core
                                sparseDenseCwiseDivTensorFlow.GenOps.Core
                                sparseDenseCwiseDiv'TensorFlow.GenOps.Core
                                sparseDenseCwiseMulTensorFlow.GenOps.Core
                                sparseDenseCwiseMul'TensorFlow.GenOps.Core
                                sparseFillEmptyRowsTensorFlow.GenOps.Core
                                sparseFillEmptyRows'TensorFlow.GenOps.Core
                                sparseFillEmptyRowsGradTensorFlow.GenOps.Core
                                sparseFillEmptyRowsGrad'TensorFlow.GenOps.Core
                                sparseMatMulTensorFlow.GenOps.Core
                                sparseMatMul'TensorFlow.GenOps.Core
                                sparseReduceMaxTensorFlow.GenOps.Core
                                sparseReduceMax'TensorFlow.GenOps.Core
                                sparseReduceMaxSparseTensorFlow.GenOps.Core
                                sparseReduceMaxSparse'TensorFlow.GenOps.Core
                                sparseReduceSumTensorFlow.GenOps.Core
                                sparseReduceSum'TensorFlow.GenOps.Core
                                sparseReduceSumSparseTensorFlow.GenOps.Core
                                sparseReduceSumSparse'TensorFlow.GenOps.Core
                                sparseReorderTensorFlow.GenOps.Core
                                sparseReorder'TensorFlow.GenOps.Core
                                sparseReshapeTensorFlow.GenOps.Core
                                sparseReshape'TensorFlow.GenOps.Core
                                sparseSegmentMeanTensorFlow.GenOps.Core
                                sparseSegmentMean'TensorFlow.GenOps.Core
                                sparseSegmentMeanGradTensorFlow.GenOps.Core
                                sparseSegmentMeanGrad'TensorFlow.GenOps.Core
                                sparseSegmentSqrtNTensorFlow.GenOps.Core
                                sparseSegmentSqrtN'TensorFlow.GenOps.Core
                                sparseSegmentSqrtNGradTensorFlow.GenOps.Core
                                sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
                                sparseSegmentSumTensorFlow.GenOps.Core
                                sparseSegmentSum'TensorFlow.GenOps.Core
                                sparseSliceTensorFlow.GenOps.Core
                                sparseSlice'TensorFlow.GenOps.Core
                                sparseSoftmaxTensorFlow.GenOps.Core
                                sparseSoftmax'TensorFlow.GenOps.Core
                                sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
                                sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
                                sparseSparseMaximumTensorFlow.GenOps.Core
                                sparseSparseMaximum'TensorFlow.GenOps.Core
                                sparseSparseMinimumTensorFlow.GenOps.Core
                                sparseSparseMinimum'TensorFlow.GenOps.Core
                                sparseSplitTensorFlow.GenOps.Core
                                sparseSplit'TensorFlow.GenOps.Core
                                sparseTensorDenseAddTensorFlow.GenOps.Core
                                sparseTensorDenseAdd'TensorFlow.GenOps.Core
                                sparseTensorDenseMatMulTensorFlow.GenOps.Core
                                sparseTensorDenseMatMul'TensorFlow.GenOps.Core
                                sparseTensorSliceDatasetTensorFlow.GenOps.Core
                                sparseTensorSliceDataset'TensorFlow.GenOps.Core
                                sparseToDenseTensorFlow.GenOps.Core
                                sparseToDense'TensorFlow.GenOps.Core
                                sparseToSparseSetOperationTensorFlow.GenOps.Core
                                sparseToSparseSetOperation'TensorFlow.GenOps.Core
                                splitTensorFlow.GenOps.Core
                                split'TensorFlow.GenOps.Core
                                splitVTensorFlow.GenOps.Core
                                splitV'TensorFlow.GenOps.Core
                                sqrtTensorFlow.GenOps.Core
                                sqrt'TensorFlow.GenOps.Core
                                sqrtGradTensorFlow.GenOps.Core
                                sqrtGrad'TensorFlow.GenOps.Core
                                squareTensorFlow.GenOps.Core
                                square'TensorFlow.GenOps.Core
                                squaredDifferenceTensorFlow.GenOps.Core
                                squaredDifference'TensorFlow.GenOps.Core
                                squeezeTensorFlow.GenOps.Core
                                squeeze'TensorFlow.GenOps.Core
                                stackTensorFlow.GenOps.Core
                                stack'TensorFlow.GenOps.Core
                                stackCloseTensorFlow.GenOps.Core
                                stackClose'TensorFlow.GenOps.Core
                                stackCloseV2TensorFlow.GenOps.Core
                                stackCloseV2'TensorFlow.GenOps.Core
                                stackPopTensorFlow.GenOps.Core
                                stackPop'TensorFlow.GenOps.Core
                                stackPopV2TensorFlow.GenOps.Core
                                stackPopV2'TensorFlow.GenOps.Core
                                stackPushTensorFlow.GenOps.Core
                                stackPush'TensorFlow.GenOps.Core
                                stackPushV2TensorFlow.GenOps.Core
                                stackPushV2'TensorFlow.GenOps.Core
                                stackV2TensorFlow.GenOps.Core
                                stackV2'TensorFlow.GenOps.Core
                                stageTensorFlow.GenOps.Core
                                stage'TensorFlow.GenOps.Core
                                stageClearTensorFlow.GenOps.Core
                                stageClear'TensorFlow.GenOps.Core
                                stagePeekTensorFlow.GenOps.Core
                                stagePeek'TensorFlow.GenOps.Core
                                stageSizeTensorFlow.GenOps.Core
                                stageSize'TensorFlow.GenOps.Core
                                statelessRandomNormalTensorFlow.GenOps.Core
                                statelessRandomNormal'TensorFlow.GenOps.Core
                                statelessRandomUniformTensorFlow.GenOps.Core
                                statelessRandomUniform'TensorFlow.GenOps.Core
                                statelessTruncatedNormalTensorFlow.GenOps.Core
                                statelessTruncatedNormal'TensorFlow.GenOps.Core
                                stopGradientTensorFlow.GenOps.Core
                                stopGradient'TensorFlow.GenOps.Core
                                stridedSliceTensorFlow.GenOps.Core
                                stridedSlice'TensorFlow.GenOps.Core
                                stridedSliceAssignTensorFlow.GenOps.Core
                                stridedSliceAssign'TensorFlow.GenOps.Core
                                stridedSliceGradTensorFlow.GenOps.Core
                                stridedSliceGrad'TensorFlow.GenOps.Core
                                stringJoinTensorFlow.GenOps.Core
                                stringJoin'TensorFlow.GenOps.Core
                                stringSplitTensorFlow.GenOps.Core
                                stringSplit'TensorFlow.GenOps.Core
                                stringToHashBucketTensorFlow.GenOps.Core
                                stringToHashBucket'TensorFlow.GenOps.Core
                                stringToHashBucketFastTensorFlow.GenOps.Core
                                stringToHashBucketFast'TensorFlow.GenOps.Core
                                stringToHashBucketStrongTensorFlow.GenOps.Core
                                stringToHashBucketStrong'TensorFlow.GenOps.Core
                                stringToNumberTensorFlow.GenOps.Core
                                stringToNumber'TensorFlow.GenOps.Core
                                subTensorFlow.GenOps.Core
                                sub'TensorFlow.GenOps.Core
                                substrTensorFlow.GenOps.Core
                                substr'TensorFlow.GenOps.Core
                                sumTensorFlow.GenOps.Core
                                sum'TensorFlow.GenOps.Core
                                svdTensorFlow.GenOps.Core
                                svd'TensorFlow.GenOps.Core
                                switchTensorFlow.GenOps.Core
                                switch'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html index 0150437..31d88be 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - T)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - T

                                takeManySparseFromTensorsMapTensorFlow.GenOps.Core
                                takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
                                tanTensorFlow.GenOps.Core
                                tan'TensorFlow.GenOps.Core
                                tanhTensorFlow.GenOps.Core
                                tanh'TensorFlow.GenOps.Core
                                tanhGradTensorFlow.GenOps.Core
                                tanhGrad'TensorFlow.GenOps.Core
                                temporaryVariableTensorFlow.GenOps.Core
                                temporaryVariable'TensorFlow.GenOps.Core
                                tensorArrayTensorFlow.GenOps.Core
                                tensorArray'TensorFlow.GenOps.Core
                                tensorArrayCloseTensorFlow.GenOps.Core
                                tensorArrayClose'TensorFlow.GenOps.Core
                                tensorArrayCloseV2TensorFlow.GenOps.Core
                                tensorArrayCloseV2'TensorFlow.GenOps.Core
                                tensorArrayCloseV3TensorFlow.GenOps.Core
                                tensorArrayCloseV3'TensorFlow.GenOps.Core
                                tensorArrayConcatTensorFlow.GenOps.Core
                                tensorArrayConcat'TensorFlow.GenOps.Core
                                tensorArrayConcatV2TensorFlow.GenOps.Core
                                tensorArrayConcatV2'TensorFlow.GenOps.Core
                                tensorArrayConcatV3TensorFlow.GenOps.Core
                                tensorArrayConcatV3'TensorFlow.GenOps.Core
                                tensorArrayGatherTensorFlow.GenOps.Core
                                tensorArrayGather'TensorFlow.GenOps.Core
                                tensorArrayGatherV2TensorFlow.GenOps.Core
                                tensorArrayGatherV2'TensorFlow.GenOps.Core
                                tensorArrayGatherV3TensorFlow.GenOps.Core
                                tensorArrayGatherV3'TensorFlow.GenOps.Core
                                tensorArrayGradTensorFlow.GenOps.Core
                                tensorArrayGrad'TensorFlow.GenOps.Core
                                tensorArrayGradV2TensorFlow.GenOps.Core
                                tensorArrayGradV2'TensorFlow.GenOps.Core
                                tensorArrayGradV3TensorFlow.GenOps.Core
                                tensorArrayGradV3'TensorFlow.GenOps.Core
                                tensorArrayPackTensorFlow.GenOps.Core
                                tensorArrayPack'TensorFlow.GenOps.Core
                                tensorArrayReadTensorFlow.GenOps.Core
                                tensorArrayRead'TensorFlow.GenOps.Core
                                tensorArrayReadV2TensorFlow.GenOps.Core
                                tensorArrayReadV2'TensorFlow.GenOps.Core
                                tensorArrayReadV3TensorFlow.GenOps.Core
                                tensorArrayReadV3'TensorFlow.GenOps.Core
                                tensorArrayScatterTensorFlow.GenOps.Core
                                tensorArrayScatter'TensorFlow.GenOps.Core
                                tensorArrayScatterV2TensorFlow.GenOps.Core
                                tensorArrayScatterV2'TensorFlow.GenOps.Core
                                tensorArrayScatterV3TensorFlow.GenOps.Core
                                tensorArrayScatterV3'TensorFlow.GenOps.Core
                                tensorArraySizeTensorFlow.GenOps.Core
                                tensorArraySize'TensorFlow.GenOps.Core
                                tensorArraySizeV2TensorFlow.GenOps.Core
                                tensorArraySizeV2'TensorFlow.GenOps.Core
                                tensorArraySizeV3TensorFlow.GenOps.Core
                                tensorArraySizeV3'TensorFlow.GenOps.Core
                                tensorArraySplitTensorFlow.GenOps.Core
                                tensorArraySplit'TensorFlow.GenOps.Core
                                tensorArraySplitV2TensorFlow.GenOps.Core
                                tensorArraySplitV2'TensorFlow.GenOps.Core
                                tensorArraySplitV3TensorFlow.GenOps.Core
                                tensorArraySplitV3'TensorFlow.GenOps.Core
                                tensorArrayUnpackTensorFlow.GenOps.Core
                                tensorArrayUnpack'TensorFlow.GenOps.Core
                                tensorArrayV2TensorFlow.GenOps.Core
                                tensorArrayV2'TensorFlow.GenOps.Core
                                tensorArrayV3TensorFlow.GenOps.Core
                                tensorArrayV3'TensorFlow.GenOps.Core
                                tensorArrayWriteTensorFlow.GenOps.Core
                                tensorArrayWrite'TensorFlow.GenOps.Core
                                tensorArrayWriteV2TensorFlow.GenOps.Core
                                tensorArrayWriteV2'TensorFlow.GenOps.Core
                                tensorArrayWriteV3TensorFlow.GenOps.Core
                                tensorArrayWriteV3'TensorFlow.GenOps.Core
                                tensorSummaryTensorFlow.GenOps.Core
                                tensorSummary'TensorFlow.GenOps.Core
                                textLineReaderTensorFlow.GenOps.Core
                                textLineReader'TensorFlow.GenOps.Core
                                textLineReaderV2TensorFlow.GenOps.Core
                                textLineReaderV2'TensorFlow.GenOps.Core
                                tFRecordReaderTensorFlow.GenOps.Core
                                tFRecordReader'TensorFlow.GenOps.Core
                                tFRecordReaderV2TensorFlow.GenOps.Core
                                tFRecordReaderV2'TensorFlow.GenOps.Core
                                threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
                                threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
                                tileTensorFlow.GenOps.Core
                                tile'TensorFlow.GenOps.Core
                                tileGradTensorFlow.GenOps.Core
                                tileGrad'TensorFlow.GenOps.Core
                                topKTensorFlow.GenOps.Core
                                topK'TensorFlow.GenOps.Core
                                topKV2TensorFlow.GenOps.Core
                                topKV2'TensorFlow.GenOps.Core
                                transposeTensorFlow.GenOps.Core
                                transpose'TensorFlow.GenOps.Core
                                truncateDivTensorFlow.GenOps.Core
                                truncateDiv'TensorFlow.GenOps.Core
                                truncatedNormalTensorFlow.GenOps.Core
                                truncatedNormal'TensorFlow.GenOps.Core
                                truncateModTensorFlow.GenOps.Core
                                truncateMod'TensorFlow.GenOps.Core
                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Index - T

                                takeDatasetTensorFlow.GenOps.Core
                                takeDataset'TensorFlow.GenOps.Core
                                takeManySparseFromTensorsMapTensorFlow.GenOps.Core
                                takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
                                tanTensorFlow.GenOps.Core
                                tan'TensorFlow.GenOps.Core
                                tanhTensorFlow.GenOps.Core
                                tanh'TensorFlow.GenOps.Core
                                tanhGradTensorFlow.GenOps.Core
                                tanhGrad'TensorFlow.GenOps.Core
                                temporaryVariableTensorFlow.GenOps.Core
                                temporaryVariable'TensorFlow.GenOps.Core
                                tensorArrayTensorFlow.GenOps.Core
                                tensorArray'TensorFlow.GenOps.Core
                                tensorArrayCloseTensorFlow.GenOps.Core
                                tensorArrayClose'TensorFlow.GenOps.Core
                                tensorArrayCloseV2TensorFlow.GenOps.Core
                                tensorArrayCloseV2'TensorFlow.GenOps.Core
                                tensorArrayCloseV3TensorFlow.GenOps.Core
                                tensorArrayCloseV3'TensorFlow.GenOps.Core
                                tensorArrayConcatTensorFlow.GenOps.Core
                                tensorArrayConcat'TensorFlow.GenOps.Core
                                tensorArrayConcatV2TensorFlow.GenOps.Core
                                tensorArrayConcatV2'TensorFlow.GenOps.Core
                                tensorArrayConcatV3TensorFlow.GenOps.Core
                                tensorArrayConcatV3'TensorFlow.GenOps.Core
                                tensorArrayGatherTensorFlow.GenOps.Core
                                tensorArrayGather'TensorFlow.GenOps.Core
                                tensorArrayGatherV2TensorFlow.GenOps.Core
                                tensorArrayGatherV2'TensorFlow.GenOps.Core
                                tensorArrayGatherV3TensorFlow.GenOps.Core
                                tensorArrayGatherV3'TensorFlow.GenOps.Core
                                tensorArrayGradTensorFlow.GenOps.Core
                                tensorArrayGrad'TensorFlow.GenOps.Core
                                tensorArrayGradV2TensorFlow.GenOps.Core
                                tensorArrayGradV2'TensorFlow.GenOps.Core
                                tensorArrayGradV3TensorFlow.GenOps.Core
                                tensorArrayGradV3'TensorFlow.GenOps.Core
                                tensorArrayPackTensorFlow.GenOps.Core
                                tensorArrayPack'TensorFlow.GenOps.Core
                                tensorArrayReadTensorFlow.GenOps.Core
                                tensorArrayRead'TensorFlow.GenOps.Core
                                tensorArrayReadV2TensorFlow.GenOps.Core
                                tensorArrayReadV2'TensorFlow.GenOps.Core
                                tensorArrayReadV3TensorFlow.GenOps.Core
                                tensorArrayReadV3'TensorFlow.GenOps.Core
                                tensorArrayScatterTensorFlow.GenOps.Core
                                tensorArrayScatter'TensorFlow.GenOps.Core
                                tensorArrayScatterV2TensorFlow.GenOps.Core
                                tensorArrayScatterV2'TensorFlow.GenOps.Core
                                tensorArrayScatterV3TensorFlow.GenOps.Core
                                tensorArrayScatterV3'TensorFlow.GenOps.Core
                                tensorArraySizeTensorFlow.GenOps.Core
                                tensorArraySize'TensorFlow.GenOps.Core
                                tensorArraySizeV2TensorFlow.GenOps.Core
                                tensorArraySizeV2'TensorFlow.GenOps.Core
                                tensorArraySizeV3TensorFlow.GenOps.Core
                                tensorArraySizeV3'TensorFlow.GenOps.Core
                                tensorArraySplitTensorFlow.GenOps.Core
                                tensorArraySplit'TensorFlow.GenOps.Core
                                tensorArraySplitV2TensorFlow.GenOps.Core
                                tensorArraySplitV2'TensorFlow.GenOps.Core
                                tensorArraySplitV3TensorFlow.GenOps.Core
                                tensorArraySplitV3'TensorFlow.GenOps.Core
                                tensorArrayUnpackTensorFlow.GenOps.Core
                                tensorArrayUnpack'TensorFlow.GenOps.Core
                                tensorArrayV2TensorFlow.GenOps.Core
                                tensorArrayV2'TensorFlow.GenOps.Core
                                tensorArrayV3TensorFlow.GenOps.Core
                                tensorArrayV3'TensorFlow.GenOps.Core
                                tensorArrayWriteTensorFlow.GenOps.Core
                                tensorArrayWrite'TensorFlow.GenOps.Core
                                tensorArrayWriteV2TensorFlow.GenOps.Core
                                tensorArrayWriteV2'TensorFlow.GenOps.Core
                                tensorArrayWriteV3TensorFlow.GenOps.Core
                                tensorArrayWriteV3'TensorFlow.GenOps.Core
                                tensorDatasetTensorFlow.GenOps.Core
                                tensorDataset'TensorFlow.GenOps.Core
                                tensorSliceDatasetTensorFlow.GenOps.Core
                                tensorSliceDataset'TensorFlow.GenOps.Core
                                tensorSummaryTensorFlow.GenOps.Core
                                tensorSummary'TensorFlow.GenOps.Core
                                tensorSummaryV2TensorFlow.GenOps.Core
                                tensorSummaryV2'TensorFlow.GenOps.Core
                                textLineDatasetTensorFlow.GenOps.Core
                                textLineDataset'TensorFlow.GenOps.Core
                                textLineReaderTensorFlow.GenOps.Core
                                textLineReader'TensorFlow.GenOps.Core
                                textLineReaderV2TensorFlow.GenOps.Core
                                textLineReaderV2'TensorFlow.GenOps.Core
                                tFRecordDatasetTensorFlow.GenOps.Core
                                tFRecordDataset'TensorFlow.GenOps.Core
                                tFRecordReaderTensorFlow.GenOps.Core
                                tFRecordReader'TensorFlow.GenOps.Core
                                tFRecordReaderV2TensorFlow.GenOps.Core
                                tFRecordReaderV2'TensorFlow.GenOps.Core
                                threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
                                threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
                                tileTensorFlow.GenOps.Core
                                tile'TensorFlow.GenOps.Core
                                tileGradTensorFlow.GenOps.Core
                                tileGrad'TensorFlow.GenOps.Core
                                topKTensorFlow.GenOps.Core
                                topK'TensorFlow.GenOps.Core
                                topKV2TensorFlow.GenOps.Core
                                topKV2'TensorFlow.GenOps.Core
                                transposeTensorFlow.GenOps.Core
                                transpose'TensorFlow.GenOps.Core
                                truncateDivTensorFlow.GenOps.Core
                                truncateDiv'TensorFlow.GenOps.Core
                                truncatedNormalTensorFlow.GenOps.Core
                                truncatedNormal'TensorFlow.GenOps.Core
                                truncateModTensorFlow.GenOps.Core
                                truncateMod'TensorFlow.GenOps.Core
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html index 169f2b8..163d1c4 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - U)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html index 4c456ab..1eb42b7 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - V)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html index a57b2b9..d770d6b 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - W)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html index 72fe0ad..f3c63c0 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - Z)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index.html index f2397b4..8be9ec8 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index)

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/frames.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-core-ops-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/index-frames.html deleted file mode 100644 index 60b888e..0000000 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html index 06c6208..1fd805d 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html @@ -1,4 +1,4 @@ -tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Code generated signatures for the Ops in libtensorflow.

                                Modules

                                \ No newline at end of file +

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

                                Code generated signatures for the Ops in libtensorflow.

                                Modules

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html index 677b05f..d2df300 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html @@ -1,4 +1,4 @@ -TensorFlow.GenOps.Core

                                TensorFlow.GenOps.Core

                                \ No newline at end of file +

                                TensorFlow.GenOps.Core

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/ocean.css b/docs/haddock/tensorflow-core-ops-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow.GenOps.Core.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow.GenOps.Core.html new file mode 100644 index 0000000..1e7a500 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow.GenOps.Core.html @@ -0,0 +1,48997 @@ +
                                {-# LANGUAGE ConstraintKinds #-}
                                +{-# LANGUAGE DataKinds #-}
                                +{-# LANGUAGE FlexibleContexts #-}
                                +{-# LANGUAGE FlexibleInstances #-}
                                +{-# LANGUAGE OverloadedStrings #-}
                                +{-# LANGUAGE ScopedTypeVariables #-}
                                +{-# OPTIONS_GHC -fno-warn-name-shadowing #-}
                                +{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}
                                +module TensorFlow.GenOps.Core where
                                +
                                +import Data.ByteString (ByteString)
                                +import Data.Complex (Complex)
                                +import Data.Int (Int8, Int16, Int32, Int64)
                                +import Data.Proxy (Proxy(Proxy))
                                +import Data.Word (Word8, Word16)
                                +import Lens.Family2 ((.~), (&))
                                +import TensorFlow.Build
                                +import TensorFlow.BuildOp
                                +import TensorFlow.Tensor
                                +import TensorFlow.Types
                                +
                                +-- | Raise a exception to abort the process when called.
                                +--
                                +-- If exit_without_error is true, the process will exit normally,
                                +-- otherwise it will exit with a SIGABORT signal.
                                +-- 
                                +-- Returns nothing but an exception.
                                +abort :: forall m' . (MonadBuild m') => 
                                +         m' (ControlNode)
                                +abort = abort' id
                                +abort' :: forall m' . (MonadBuild m') => OpParams ->
                                +          m' (ControlNode)
                                +abort' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "Abort"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +attr {
                                +  name: "error_msg"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "A string which is the message associated with the exception."
                                +}
                                +attr {
                                +  name: "exit_without_error" type: "bool" default_value { b: false }
                                +}
                                +-}
                                +
                                +-- | Computes the absolute value of a tensor.
                                +--
                                +-- Given a tensor `x`, this operation returns a tensor containing the absolute
                                +-- value of each element in `x`. For example, if x is an input element and y is
                                +-- an output element, this operation computes \\(y = |x|\\).
                                +abs :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16,
                                +                               Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor Build t -- ^ __y__
                                +abs = abs' id
                                +abs' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                Data.Word.Word16, Double, Float] t) =>
                                +        OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +abs' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Abs"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Applies a gradient to a given accumulator.
                                +--
                                +-- Does not add if local_step is lesser than the accumulator's global_step.
                                +accumulatorApplyGradient :: forall v'2 v'3 dtype m' . (MonadBuild m',
                                +                                                       OneOf '[(Data.Complex.Complex Double),
                                +                                                               (Data.Complex.Complex Float),
                                +                                                               Data.Int.Int16,
                                +                                                               Data.Int.Int32,
                                +                                                               Data.Int.Int64,
                                +                                                               Data.Int.Int8,
                                +                                                               Data.Word.Word16,
                                +                                                               Data.Word.Word8,
                                +                                                               Double,
                                +                                                               Float] dtype) => 
                                +                            Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator.
                                +                            -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the gradient was computed.
                                +                            -> Tensor v'3 dtype -- ^ __gradient__: A tensor of the gradient to be accumulated.
                                +                            -> m' (ControlNode)
                                +accumulatorApplyGradient = accumulatorApplyGradient' id
                                +accumulatorApplyGradient' :: forall v'2 v'3 dtype m' . (MonadBuild m',
                                +                                                        OneOf '[(Data.Complex.Complex Double),
                                +                                                                (Data.Complex.Complex Float),
                                +                                                                Data.Int.Int16,
                                +                                                                Data.Int.Int32,
                                +                                                                Data.Int.Int64,
                                +                                                                Data.Int.Int8,
                                +                                                                Data.Word.Word16,
                                +                                                                Data.Word.Word8,
                                +                                                                Double,
                                +                                                                Float] dtype) =>
                                +                             OpParams ->
                                +                             Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator.
                                +                             -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the gradient was computed.
                                +                             -> Tensor v'3 dtype -- ^ __gradient__: A tensor of the gradient to be accumulated.
                                +                             -> m' (ControlNode)
                                +accumulatorApplyGradient' op'options handle local_step
                                +                          gradient | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs local_step,
                                +                                                             buildInputs gradient]
                                +        buildOp [] (opDef "AccumulatorApplyGradient"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a accumulator."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "local_step"
                                +  description: "The local_step value at which the gradient was computed."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "gradient"
                                +  description: "A tensor of the gradient to be accumulated."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns the number of gradients aggregated in the given accumulators.
                                +
                                +accumulatorNumAccumulated :: forall m' . (MonadBuild m') => 
                                +                             Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator.
                                +                             -> m' (Tensor Value Data.Int.Int32) -- ^ __num_accumulated__: The number of gradients aggregated in the given accumulator.
                                +accumulatorNumAccumulated = accumulatorNumAccumulated' id
                                +accumulatorNumAccumulated' :: forall m' . (MonadBuild m') => OpParams ->
                                +                              Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator.
                                +                              -> m' (Tensor Value Data.Int.Int32) -- ^ __num_accumulated__: The number of gradients aggregated in the given accumulator.
                                +accumulatorNumAccumulated' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "AccumulatorNumAccumulated"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to an accumulator."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "num_accumulated"
                                +  description: "The number of gradients aggregated in the given accumulator."
                                +  type: DT_INT32
                                +}
                                +-}
                                +
                                +-- | Updates the accumulator with a new value for global_step.
                                +--
                                +-- Logs warning if the accumulator's value is already higher than
                                +-- new_global_step.
                                +accumulatorSetGlobalStep :: forall v'2 m' . (MonadBuild m') => 
                                +                            Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator.
                                +                            -> Tensor v'2 Data.Int.Int64 -- ^ __new_global_step__: The new global_step value to set.
                                +                            -> m' (ControlNode)
                                +accumulatorSetGlobalStep = accumulatorSetGlobalStep' id
                                +accumulatorSetGlobalStep' :: forall v'2 m' . (MonadBuild m') => OpParams ->
                                +                             Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator.
                                +                             -> Tensor v'2 Data.Int.Int64 -- ^ __new_global_step__: The new global_step value to set.
                                +                             -> m' (ControlNode)
                                +accumulatorSetGlobalStep' op'options handle new_global_step | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs new_global_step]
                                +        buildOp [] (opDef "AccumulatorSetGlobalStep"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to an accumulator."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "new_global_step"
                                +  description: "The new global_step value to set."
                                +  type: DT_INT64
                                +}
                                +-}
                                +
                                +-- | Extracts the average gradient in the given ConditionalAccumulator.
                                +--
                                +-- The op blocks until sufficient (i.e., more than num_required)
                                +-- gradients have been accumulated.  If the accumulator has already
                                +-- aggregated more than num_required gradients, it returns the average of
                                +-- the accumulated gradients.  Also automatically increments the recorded
                                +-- global_step in the accumulator by 1, and resets the aggregate to 0.
                                +accumulatorTakeGradient :: forall v'2 dtype m' . (MonadBuild m',
                                +                                                  OneOf '[(Data.Complex.Complex Double),
                                +                                                          (Data.Complex.Complex Float),
                                +                                                          Data.Int.Int16,
                                +                                                          Data.Int.Int32,
                                +                                                          Data.Int.Int64,
                                +                                                          Data.Int.Int8,
                                +                                                          Data.Word.Word16,
                                +                                                          Data.Word.Word8,
                                +                                                          Double,
                                +                                                          Float] dtype) => 
                                +                           Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator.
                                +                           -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate.
                                +                           -> m' (Tensor Value dtype) -- ^ __average__: The average of the accumulated gradients.
                                +accumulatorTakeGradient = accumulatorTakeGradient' id
                                +accumulatorTakeGradient' :: forall v'2 dtype m' . (MonadBuild m',
                                +                                                   OneOf '[(Data.Complex.Complex Double),
                                +                                                           (Data.Complex.Complex Float),
                                +                                                           Data.Int.Int16,
                                +                                                           Data.Int.Int32,
                                +                                                           Data.Int.Int64,
                                +                                                           Data.Int.Int8,
                                +                                                           Data.Word.Word16,
                                +                                                           Data.Word.Word8,
                                +                                                           Double,
                                +                                                           Float] dtype) =>
                                +                            OpParams ->
                                +                            Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator.
                                +                            -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate.
                                +                            -> m' (Tensor Value dtype) -- ^ __average__: The average of the accumulated gradients.
                                +accumulatorTakeGradient' op'options handle num_required | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs num_required]
                                +        buildOp [] (opDef "AccumulatorTakeGradient"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to an accumulator."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "num_required"
                                +  description: "Number of gradients required before we return an aggregate."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "average"
                                +  description: "The average of the accumulated gradients."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes acos of x element-wise.
                                +
                                +acos :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                Data.Int.Int64, Data.Word.Word16, Double,
                                +                                Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +acos = acos' id
                                +acos' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                 Data.Int.Int64, Data.Word.Word16, Double,
                                +                                 Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +acos' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Acos"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes inverse hyperbolic cosine of x element-wise.
                                +
                                +acosh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +acosh = acosh' id
                                +acosh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float),
                                +                                  Data.Word.Word16, Double, Float] t) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +acosh' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Acosh"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns x + y element-wise.
                                +--
                                +-- *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +add :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                   (Data.Complex.Complex Float),
                                +                                   Data.ByteString.ByteString, Data.Int.Int16,
                                +                                   Data.Int.Int32, Data.Int.Int64,
                                +                                   Data.Int.Int8, Data.Word.Word16,
                                +                                   Data.Word.Word8, Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor v'2 t -- ^ __y__
                                +       -> Tensor Build t -- ^ __z__
                                +add = add' id
                                +add' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.ByteString.ByteString, Data.Int.Int16,
                                +                                    Data.Int.Int32, Data.Int.Int64,
                                +                                    Data.Int.Int8, Data.Word.Word16,
                                +                                    Data.Word.Word8, Double, Float] t) =>
                                +        OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor v'2 t -- ^ __y__
                                +        -> Tensor Build t -- ^ __z__
                                +add' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Add"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_STRING
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
                                +--
                                +-- A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
                                +-- `sparse_values`, and `sparse_shape`, where
                                +-- 
                                +-- ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
                                +-- 
                                +-- An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
                                +-- having a first `sparse_indices` column taking values between `[0, N)`, where
                                +-- the minibatch size `N == sparse_shape[0]`.
                                +-- 
                                +-- The input `SparseTensor` must have rank `R` greater than 1, and the first
                                +-- dimension is treated as the minibatch dimension.  Elements of the `SparseTensor`
                                +-- must be sorted in increasing order of this first dimension.  The stored
                                +-- `SparseTensor` objects pointed to by each row of the output `sparse_handles`
                                +-- will have rank `R-1`.
                                +-- 
                                +-- The `SparseTensor` values can then be read out as part of a minibatch by passing
                                +-- the given keys as vector elements to `TakeManySparseFromTensorsMap`.  To ensure
                                +-- the correct `SparseTensorsMap` is accessed, ensure that the same
                                +-- `container` and `shared_name` are passed to that Op.  If no `shared_name`
                                +-- is provided here, instead use the *name* of the Operation created by calling
                                +-- `AddManySparseToTensorsMap` as the `shared_name` passed to
                                +-- `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
                                +addManySparseToTensorsMap :: forall v'1 v'2 v'3 t m' . (MonadBuild m',
                                +                                                        TensorType t) => 
                                +                             Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
                                +                                                       -- `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
                                +                             -> Tensor v'2 t -- ^ __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
                                +                             -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
                                +                                                          -- The minibatch size `N == sparse_shape[0]`.
                                +                             -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handles__: 1-D.  The handles of the `SparseTensor` now stored in the
                                +                             -- `SparseTensorsMap`.  Shape: `[N]`.
                                +addManySparseToTensorsMap = addManySparseToTensorsMap' id
                                +addManySparseToTensorsMap' :: forall v'1 v'2 v'3 t m' . (MonadBuild m',
                                +                                                         TensorType t) =>
                                +                              OpParams ->
                                +                              Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
                                +                                                        -- `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
                                +                              -> Tensor v'2 t -- ^ __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
                                +                              -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
                                +                                                           -- The minibatch size `N == sparse_shape[0]`.
                                +                              -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handles__: 1-D.  The handles of the `SparseTensor` now stored in the
                                +                              -- `SparseTensorsMap`.  Shape: `[N]`.
                                +addManySparseToTensorsMap' op'options sparse_indices sparse_values
                                +                           sparse_shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices,
                                +                                                             buildInputs sparse_values,
                                +                                                             buildInputs sparse_shape]
                                +        buildOp [] (opDef "AddManySparseToTensorsMap"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sparse_indices"
                                +  description: "2-D.  The `indices` of the minibatch `SparseTensor`.\n`sparse_indices[:, 0]` must be ordered values in `[0, N)`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sparse_values"
                                +  description: "1-D.  The `values` of the minibatch `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "sparse_shape"
                                +  description: "1-D.  The `shape` of the minibatch `SparseTensor`.\nThe minibatch size `N == sparse_shape[0]`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sparse_handles"
                                +  description: "1-D.  The handles of the `SparseTensor` now stored in the\n`SparseTensorsMap`.  Shape: `[N]`."
                                +  type: DT_INT64
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "The container name for the `SparseTensorsMap` created by this op."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used."
                                +}
                                +-}
                                +
                                +-- | Add all input tensors element wise.
                                +
                                +addN :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                                Data.Word.Word16, Data.Word.Word8, Double,
                                +                                Float] t) => 
                                +        [Tensor v'1 t] -- ^ __inputs__: Must all be the same size and shape.
                                +        -> Tensor Build t -- ^ __sum__
                                +addN = addN' id
                                +addN' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                 Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                                 Data.Word.Word16, Data.Word.Word8, Double,
                                +                                 Float] t) => OpParams ->
                                +         [Tensor v'1 t] -- ^ __inputs__: Must all be the same size and shape.
                                +         -> Tensor Build t -- ^ __sum__
                                +addN' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs]
                                +        return (opDef "AddN"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length inputs) :: Int64
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "Must all be the same size and shape."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +}
                                +output_arg { name: "sum" type_attr: "T" }
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
                                +--
                                +-- A `SparseTensor` is represented by three tensors: `sparse_indices`,
                                +-- `sparse_values`, and `sparse_shape`.
                                +-- 
                                +-- This operator takes the given `SparseTensor` and adds it to a container
                                +-- object (a `SparseTensorsMap`).  A unique key within this container is generated
                                +-- in the form of an `int64`, and this is the value that is returned.
                                +-- 
                                +-- The `SparseTensor` can then be read out as part of a minibatch by passing
                                +-- the key as a vector element to `TakeManySparseFromTensorsMap`.  To ensure
                                +-- the correct `SparseTensorsMap` is accessed, ensure that the same
                                +-- `container` and `shared_name` are passed to that Op.  If no `shared_name`
                                +-- is provided here, instead use the *name* of the Operation created by calling
                                +-- `AddSparseToTensorsMap` as the `shared_name` passed to
                                +-- `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
                                +addSparseToTensorsMap :: forall v'1 v'2 v'3 t m' . (MonadBuild m',
                                +                                                    TensorType t) => 
                                +                         Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the `SparseTensor`.
                                +                         -> Tensor v'2 t -- ^ __sparse_values__: 1-D.  The `values` of the `SparseTensor`.
                                +                         -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the `SparseTensor`.
                                +                         -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handle__: 0-D.  The handle of the `SparseTensor` now stored in the
                                +                         -- `SparseTensorsMap`.
                                +addSparseToTensorsMap = addSparseToTensorsMap' id
                                +addSparseToTensorsMap' :: forall v'1 v'2 v'3 t m' . (MonadBuild m',
                                +                                                     TensorType t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the `SparseTensor`.
                                +                          -> Tensor v'2 t -- ^ __sparse_values__: 1-D.  The `values` of the `SparseTensor`.
                                +                          -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the `SparseTensor`.
                                +                          -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handle__: 0-D.  The handle of the `SparseTensor` now stored in the
                                +                          -- `SparseTensorsMap`.
                                +addSparseToTensorsMap' op'options sparse_indices sparse_values
                                +                       sparse_shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices,
                                +                                                             buildInputs sparse_values,
                                +                                                             buildInputs sparse_shape]
                                +        buildOp [] (opDef "AddSparseToTensorsMap"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sparse_indices"
                                +  description: "2-D.  The `indices` of the `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sparse_values"
                                +  description: "1-D.  The `values` of the `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "sparse_shape"
                                +  description: "1-D.  The `shape` of the `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sparse_handle"
                                +  description: "0-D.  The handle of the `SparseTensor` now stored in the\n`SparseTensorsMap`."
                                +  type: DT_INT64
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "The container name for the `SparseTensorsMap` created by this op."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used."
                                +}
                                +-}
                                +
                                +-- | Deprecated. Disallowed in GraphDef version >= 2.
                                +
                                +adjustContrast :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t) => 
                                +                  Tensor v'1 t -- ^ __images__
                                +                  -> Tensor v'2 Float -- ^ __contrast_factor__
                                +                  -> Tensor v'3 Float -- ^ __min_value__
                                +                  -> Tensor v'4 Float -- ^ __max_value__
                                +                  -> Tensor Build Float -- ^ __output__
                                +adjustContrast = adjustContrast' id
                                +adjustContrast' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t) => OpParams ->
                                +                   Tensor v'1 t -- ^ __images__
                                +                   -> Tensor v'2 Float -- ^ __contrast_factor__
                                +                   -> Tensor v'3 Float -- ^ __min_value__
                                +                   -> Tensor v'4 Float -- ^ __max_value__
                                +                   -> Tensor Build Float -- ^ __output__
                                +adjustContrast' op'options images contrast_factor min_value
                                +                max_value | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs contrast_factor,
                                +                                                             buildInputs min_value,
                                +                                                             buildInputs max_value]
                                +        return (opDef "AdjustContrast"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "images" type_attr: "T" }
                                +input_arg { name: "contrast_factor" type: DT_FLOAT }
                                +input_arg { name: "min_value" type: DT_FLOAT }
                                +input_arg { name: "max_value" type: DT_FLOAT }
                                +output_arg { name: "output" type: DT_FLOAT }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Adjust the contrast of one or more images.
                                +--
                                +-- `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
                                +-- interpreted as `[height, width, channels]`.  The other dimensions only
                                +-- represent a collection of images, such as `[batch, height, width, channels].`
                                +-- 
                                +-- Contrast is adjusted independently for each channel of each image.
                                +-- 
                                +-- For each channel, the Op first computes the mean of the image pixels in the
                                +-- channel and then adjusts each component of each pixel to
                                +-- `(x - mean) * contrast_factor + mean`.
                                +adjustContrastv2 :: 
                                +                    Tensor v'1 Float -- ^ __images__: Images to adjust.  At least 3-D.
                                +                    -> Tensor v'2 Float -- ^ __contrast_factor__: A float multiplier for adjusting contrast.
                                +                    -> Tensor Build Float -- ^ __output__: The contrast-adjusted image or images.
                                +adjustContrastv2 = adjustContrastv2' id
                                +adjustContrastv2' :: OpParams ->
                                +                     Tensor v'1 Float -- ^ __images__: Images to adjust.  At least 3-D.
                                +                     -> Tensor v'2 Float -- ^ __contrast_factor__: A float multiplier for adjusting contrast.
                                +                     -> Tensor Build Float -- ^ __output__: The contrast-adjusted image or images.
                                +adjustContrastv2' op'options images contrast_factor | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs contrast_factor]
                                +        return (opDef "AdjustContrastv2"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "Images to adjust.  At least 3-D."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "contrast_factor"
                                +  description: "A float multiplier for adjusting contrast."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The contrast-adjusted image or images."
                                +  type: DT_FLOAT
                                +}
                                +-}
                                +
                                +-- | Adjust the hue of one or more images.
                                +--
                                +-- `images` is a tensor of at least 3 dimensions.  The last dimension is
                                +-- interpretted as channels, and must be three.
                                +-- 
                                +-- The input image is considered in the RGB colorspace. Conceptually, the RGB
                                +-- colors are first mapped into HSV. A delta is then applied all the hue values,
                                +-- and then remapped back to RGB colorspace.
                                +adjustHue :: 
                                +             Tensor v'1 Float -- ^ __images__: Images to adjust.  At least 3-D.
                                +             -> Tensor v'2 Float -- ^ __delta__: A float delta to add to the hue.
                                +             -> Tensor Build Float -- ^ __output__: The hue-adjusted image or images.
                                +adjustHue = adjustHue' id
                                +adjustHue' :: OpParams ->
                                +              Tensor v'1 Float -- ^ __images__: Images to adjust.  At least 3-D.
                                +              -> Tensor v'2 Float -- ^ __delta__: A float delta to add to the hue.
                                +              -> Tensor Build Float -- ^ __output__: The hue-adjusted image or images.
                                +adjustHue' op'options images delta | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs delta]
                                +        return (opDef "AdjustHue"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "Images to adjust.  At least 3-D."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "delta"
                                +  description: "A float delta to add to the hue."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The hue-adjusted image or images."
                                +  type: DT_FLOAT
                                +}
                                +-}
                                +
                                +-- | Adjust the saturation of one or more images.
                                +--
                                +-- `images` is a tensor of at least 3 dimensions.  The last dimension is
                                +-- interpretted as channels, and must be three.
                                +-- 
                                +-- The input image is considered in the RGB colorspace. Conceptually, the RGB
                                +-- colors are first mapped into HSV. A scale is then applied all the saturation
                                +-- values, and then remapped back to RGB colorspace.
                                +adjustSaturation :: 
                                +                    Tensor v'1 Float -- ^ __images__: Images to adjust.  At least 3-D.
                                +                    -> Tensor v'2 Float -- ^ __scale__: A float scale to add to the saturation.
                                +                    -> Tensor Build Float -- ^ __output__: The hue-adjusted image or images.
                                +adjustSaturation = adjustSaturation' id
                                +adjustSaturation' :: OpParams ->
                                +                     Tensor v'1 Float -- ^ __images__: Images to adjust.  At least 3-D.
                                +                     -> Tensor v'2 Float -- ^ __scale__: A float scale to add to the saturation.
                                +                     -> Tensor Build Float -- ^ __output__: The hue-adjusted image or images.
                                +adjustSaturation' op'options images scale | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs scale]
                                +        return (opDef "AdjustSaturation"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "Images to adjust.  At least 3-D."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "scale"
                                +  description: "A float scale to add to the saturation."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The hue-adjusted image or images."
                                +  type: DT_FLOAT
                                +}
                                +-}
                                +
                                +-- | Computes the "logical and" of elements across dimensions of a tensor.
                                +--
                                +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
                                +-- retained with length 1.
                                +all :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => 
                                +       Tensor v'1 Bool -- ^ __input__: The tensor to reduce.
                                +       -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +       -> Tensor Build Bool -- ^ __output__: The reduced tensor.
                                +all = all' id
                                +all' :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
                                +        OpParams ->
                                +        Tensor v'1 Bool -- ^ __input__: The tensor to reduce.
                                +        -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +        -> Tensor Build Bool -- ^ __output__: The reduced tensor.
                                +all' op'options input reduction_indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs reduction_indices]
                                +        return (opDef "All"
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The tensor to reduce." type: DT_BOOL
                                +}
                                +input_arg {
                                +  name: "reduction_indices"
                                +  description: "The dimensions to reduce."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output" description: "The reduced tensor." type: DT_BOOL
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Generates labels for candidate sampling with a learned unigram distribution.
                                +--
                                +-- See explanations of candidate sampling and the data formats at
                                +-- go/candidate-sampling.
                                +-- 
                                +-- For each batch, this op picks a single set of sampled candidate labels.
                                +-- 
                                +-- The advantages of sampling candidates per-batch are simplicity and the
                                +-- possibility of efficient dense matrix multiplication. The disadvantage is that
                                +-- the sampled candidates must be chosen independently of the context and of the
                                +-- true labels.
                                +allCandidateSampler :: forall v'1 m' . (MonadBuild m') => 
                                +                       Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to produce.
                                +                       -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                       -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                               -- candidates in a batch are unique. This requires some approximation to
                                +                               -- estimate the post-rejection sampling probabilities.
                                +                       -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                    -- IDs of the num_true target_classes in the corresponding original label.
                                +                       -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float,
                                +                               Tensor Value Float))
                                +                       -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                       --
                                +                       -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                       -- the ID of a sampled candidate.
                                +                       --
                                +                       -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                       -- the number of times each candidate is expected to occur in a batch
                                +                       -- of sampled candidates. If unique=true, then this is a probability.
                                +                       --
                                +                       -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                       -- candidate representing the number of times the candidate is expected
                                +                       -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                       -- probability.
                                +allCandidateSampler = allCandidateSampler' id
                                +allCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                        Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to produce.
                                +                        -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                        -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                -- candidates in a batch are unique. This requires some approximation to
                                +                                -- estimate the post-rejection sampling probabilities.
                                +                        -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                     -- IDs of the num_true target_classes in the corresponding original label.
                                +                        -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float,
                                +                                Tensor Value Float))
                                +                        -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                        --
                                +                        -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                        -- the ID of a sampled candidate.
                                +                        --
                                +                        -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                        -- the number of times each candidate is expected to occur in a batch
                                +                        -- of sampled candidates. If unique=true, then this is a probability.
                                +                        --
                                +                        -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                        -- candidate representing the number of times the candidate is expected
                                +                        -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                        -- probability.
                                +allCandidateSampler' op'options num_sampled num_true unique
                                +                     true_classes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes]
                                +        buildOp [] (opDef "AllCandidateSampler"
                                +                    & opAttr "num_sampled" .~ num_sampled
                                +                    & opAttr "num_true" .~ num_true
                                +                    & opAttr "unique" .~ unique
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "true_classes"
                                +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sampled_candidates"
                                +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "true_expected_count"
                                +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "sampled_expected_count"
                                +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "num_true"
                                +  type: "int"
                                +  description: "Number of true labels per context."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "num_sampled"
                                +  type: "int"
                                +  description: "Number of candidates to produce."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "unique"
                                +  type: "bool"
                                +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +-}
                                +
                                +-- | Computes the "logical or" of elements across dimensions of a tensor.
                                +--
                                +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
                                +-- retained with length 1.
                                +any :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => 
                                +       Tensor v'1 Bool -- ^ __input__: The tensor to reduce.
                                +       -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +       -> Tensor Build Bool -- ^ __output__: The reduced tensor.
                                +any = any' id
                                +any' :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
                                +        OpParams ->
                                +        Tensor v'1 Bool -- ^ __input__: The tensor to reduce.
                                +        -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +        -> Tensor Build Bool -- ^ __output__: The reduced tensor.
                                +any' op'options input reduction_indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs reduction_indices]
                                +        return (opDef "Any"
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The tensor to reduce." type: DT_BOOL
                                +}
                                +input_arg {
                                +  name: "reduction_indices"
                                +  description: "The dimensions to reduce."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output" description: "The reduced tensor." type: DT_BOOL
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the adadelta scheme.
                                +--
                                +-- accum = rho() * accum + (1 - rho()) * grad.square();
                                +-- update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
                                +-- update_accum = rho() * update_accum + (1 - rho()) * update.square();
                                +-- var -= update;
                                +applyAdadelta :: forall v'4 v'5 v'6 v'7 t m' . (MonadBuild m',
                                +                                                OneOf '[(Data.Complex.Complex Double),
                                +                                                        (Data.Complex.Complex Float),
                                +                                                        Data.Int.Int16,
                                +                                                        Data.Int.Int32,
                                +                                                        Data.Int.Int64,
                                +                                                        Data.Int.Int8,
                                +                                                        Data.Word.Word16,
                                +                                                        Data.Word.Word8, Double,
                                +                                                        Float] t) => 
                                +                 Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                 -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                 -> Tensor Ref t -- ^ __accum_update__: Should be from a Variable().
                                +                 -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                 -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar.
                                +                 -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
                                +                 -> Tensor v'7 t -- ^ __grad__: The gradient.
                                +                 -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyAdadelta = applyAdadelta' id
                                +applyAdadelta' :: forall v'4 v'5 v'6 v'7 t m' . (MonadBuild m',
                                +                                                 OneOf '[(Data.Complex.Complex Double),
                                +                                                         (Data.Complex.Complex Float),
                                +                                                         Data.Int.Int16,
                                +                                                         Data.Int.Int32,
                                +                                                         Data.Int.Int64,
                                +                                                         Data.Int.Int8,
                                +                                                         Data.Word.Word16,
                                +                                                         Data.Word.Word8,
                                +                                                         Double, Float] t) =>
                                +                  OpParams ->
                                +                  Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                  -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                  -> Tensor Ref t -- ^ __accum_update__: Should be from a Variable().
                                +                  -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                  -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar.
                                +                  -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
                                +                  -> Tensor v'7 t -- ^ __grad__: The gradient.
                                +                  -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyAdadelta' op'options var accum accum_update lr rho epsilon
                                +               grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs accum_update,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ApplyAdadelta"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum_update"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Constant factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the adagrad scheme.
                                +--
                                +-- accum += grad * grad
                                +-- var -= lr * grad * (1 / sqrt(accum))
                                +applyAdagrad :: forall v'3 v'4 t m' . (MonadBuild m',
                                +                                       OneOf '[(Data.Complex.Complex Double),
                                +                                               (Data.Complex.Complex Float),
                                +                                               Data.Int.Int16, Data.Int.Int32,
                                +                                               Data.Int.Int64, Data.Int.Int8,
                                +                                               Data.Word.Word16,
                                +                                               Data.Word.Word8, Double,
                                +                                               Float] t) => 
                                +                Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyAdagrad = applyAdagrad' id
                                +applyAdagrad' :: forall v'3 v'4 t m' . (MonadBuild m',
                                +                                        OneOf '[(Data.Complex.Complex Double),
                                +                                                (Data.Complex.Complex Float),
                                +                                                Data.Int.Int16, Data.Int.Int32,
                                +                                                Data.Int.Int64, Data.Int.Int8,
                                +                                                Data.Word.Word16,
                                +                                                Data.Word.Word8, Double,
                                +                                                Float] t) => OpParams ->
                                +                 Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                 -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                 -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                 -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                 -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyAdagrad' op'options var accum lr grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ApplyAdagrad"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the proximal adagrad scheme.
                                +
                                +applyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m',
                                +                                                     OneOf '[(Data.Complex.Complex Double),
                                +                                                             (Data.Complex.Complex Float),
                                +                                                             Data.Int.Int16,
                                +                                                             Data.Int.Int32,
                                +                                                             Data.Int.Int64,
                                +                                                             Data.Int.Int8,
                                +                                                             Data.Word.Word16,
                                +                                                             Data.Word.Word8,
                                +                                                             Double,
                                +                                                             Float] t) => 
                                +                  Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                  -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable().
                                +                  -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable().
                                +                  -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                  -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                  -> Tensor v'6 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                  -> Tensor v'7 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                  -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
                                +                  -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyAdagradDA = applyAdagradDA' id
                                +applyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m',
                                +                                                      OneOf '[(Data.Complex.Complex Double),
                                +                                                              (Data.Complex.Complex Float),
                                +                                                              Data.Int.Int16,
                                +                                                              Data.Int.Int32,
                                +                                                              Data.Int.Int64,
                                +                                                              Data.Int.Int8,
                                +                                                              Data.Word.Word16,
                                +                                                              Data.Word.Word8,
                                +                                                              Double,
                                +                                                              Float] t) =>
                                +                   OpParams ->
                                +                   Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                   -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable().
                                +                   -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable().
                                +                   -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                   -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                   -> Tensor v'6 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                   -> Tensor v'7 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                   -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
                                +                   -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyAdagradDA' op'options var gradient_accumulator gradient_squared_accumulator
                                +                grad lr l1 l2 global_step | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs gradient_accumulator,
                                +                                                             buildInputs gradient_squared_accumulator,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs global_step]
                                +        buildOp [] (opDef "ApplyAdagradDA"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "gradient_accumulator"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "gradient_squared_accumulator"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "global_step"
                                +  description: "Training step number. Must be a scalar."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the Adam algorithm.
                                +--
                                +-- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
                                +-- m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
                                +-- v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
                                +-- variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
                                +applyAdam :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m',
                                +                                                         OneOf '[(Data.Complex.Complex Double),
                                +                                                                 (Data.Complex.Complex Float),
                                +                                                                 Data.Int.Int16,
                                +                                                                 Data.Int.Int32,
                                +                                                                 Data.Int.Int64,
                                +                                                                 Data.Int.Int8,
                                +                                                                 Data.Word.Word16,
                                +                                                                 Data.Word.Word8,
                                +                                                                 Double,
                                +                                                                 Float] t) => 
                                +             Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +             -> Tensor Ref t -- ^ __m__: Should be from a Variable().
                                +             -> Tensor Ref t -- ^ __v__: Should be from a Variable().
                                +             -> Tensor v'4 t -- ^ __beta1_power__: Must be a scalar.
                                +             -> Tensor v'5 t -- ^ __beta2_power__: Must be a scalar.
                                +             -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +             -> Tensor v'7 t -- ^ __beta1__: Momentum factor. Must be a scalar.
                                +             -> Tensor v'8 t -- ^ __beta2__: Momentum factor. Must be a scalar.
                                +             -> Tensor v'9 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +             -> Tensor v'10 t -- ^ __grad__: The gradient.
                                +             -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyAdam = applyAdam' id
                                +applyAdam' :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m',
                                +                                                          OneOf '[(Data.Complex.Complex Double),
                                +                                                                  (Data.Complex.Complex Float),
                                +                                                                  Data.Int.Int16,
                                +                                                                  Data.Int.Int32,
                                +                                                                  Data.Int.Int64,
                                +                                                                  Data.Int.Int8,
                                +                                                                  Data.Word.Word16,
                                +                                                                  Data.Word.Word8,
                                +                                                                  Double,
                                +                                                                  Float] t) =>
                                +              OpParams ->
                                +              Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +              -> Tensor Ref t -- ^ __m__: Should be from a Variable().
                                +              -> Tensor Ref t -- ^ __v__: Should be from a Variable().
                                +              -> Tensor v'4 t -- ^ __beta1_power__: Must be a scalar.
                                +              -> Tensor v'5 t -- ^ __beta2_power__: Must be a scalar.
                                +              -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +              -> Tensor v'7 t -- ^ __beta1__: Momentum factor. Must be a scalar.
                                +              -> Tensor v'8 t -- ^ __beta2__: Momentum factor. Must be a scalar.
                                +              -> Tensor v'9 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +              -> Tensor v'10 t -- ^ __grad__: The gradient.
                                +              -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyAdam' op'options var m v beta1_power beta2_power lr beta1 beta2 epsilon
                                +           grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs m,
                                +                                                             buildInputs v,
                                +                                                             buildInputs beta1_power,
                                +                                                             buildInputs beta2_power,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs beta1,
                                +                                                             buildInputs beta2,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ApplyAdam"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "m"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "v"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "beta1_power" description: "Must be a scalar." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "beta2_power" description: "Must be a scalar." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "beta1"
                                +  description: "Momentum factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "beta2"
                                +  description: "Momentum factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +attr {
                                +  name: "use_nesterov"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, uses the nesterov update."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the centered RMSProp algorithm.
                                +--
                                +-- The centered RMSProp algorithm uses an estimate of the centered second moment
                                +-- (i.e., the variance) for normalization, as opposed to regular RMSProp, which
                                +-- uses the (uncentered) second moment. This often helps with training, but is
                                +-- slightly more expensive in terms of computation and memory.
                                +-- 
                                +-- Note that in dense implementation of this algorithm, mg, ms, and mom will
                                +-- update even if the grad is zero, but in this sparse implementation, mg, ms,
                                +-- and mom will not update in iterations during which the grad is zero.
                                +-- 
                                +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
                                +-- mean_grad = decay * mean_grad + (1-decay) * gradient
                                +-- 
                                +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
                                +-- 
                                +-- mg <- rho * mg_{t-1} + (1-rho) * grad
                                +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
                                +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
                                +-- var <- var - mom
                                +applyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m',
                                +                                                           OneOf '[(Data.Complex.Complex Double),
                                +                                                                   (Data.Complex.Complex Float),
                                +                                                                   Data.Int.Int16,
                                +                                                                   Data.Int.Int32,
                                +                                                                   Data.Int.Int64,
                                +                                                                   Data.Int.Int8,
                                +                                                                   Data.Word.Word16,
                                +                                                                   Data.Word.Word8,
                                +                                                                   Double,
                                +                                                                   Float] t) => 
                                +                        Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                        -> Tensor Ref t -- ^ __mg__: Should be from a Variable().
                                +                        -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
                                +                        -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
                                +                        -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                        -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                        -> Tensor v'7 t -- ^ __momentum__
                                +                        -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                        -> Tensor v'9 t -- ^ __grad__: The gradient.
                                +                        -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyCenteredRMSProp = applyCenteredRMSProp' id
                                +applyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m',
                                +                                                            OneOf '[(Data.Complex.Complex Double),
                                +                                                                    (Data.Complex.Complex Float),
                                +                                                                    Data.Int.Int16,
                                +                                                                    Data.Int.Int32,
                                +                                                                    Data.Int.Int64,
                                +                                                                    Data.Int.Int8,
                                +                                                                    Data.Word.Word16,
                                +                                                                    Data.Word.Word8,
                                +                                                                    Double,
                                +                                                                    Float] t) =>
                                +                         OpParams ->
                                +                         Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                         -> Tensor Ref t -- ^ __mg__: Should be from a Variable().
                                +                         -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
                                +                         -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
                                +                         -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                         -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                         -> Tensor v'7 t -- ^ __momentum__
                                +                         -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                         -> Tensor v'9 t -- ^ __grad__: The gradient.
                                +                         -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon
                                +                      grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs mg,
                                +                                                             buildInputs ms,
                                +                                                             buildInputs mom,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs momentum,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ApplyCenteredRMSProp"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "mg"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "ms"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "mom"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "momentum" type_attr: "T" }
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | var -= alpha * (delta + lambda * delta * (var - shadow))
                                +--
                                +-- Update '*shadow' by changing it to the new value of 'var'
                                +applyDelayCompensatedGradientDescent :: forall v'1 v'2 v'3 v'4 v'5 t
                                +                                        m' . (MonadBuild m',
                                +                                              OneOf '[(Data.Complex.Complex Double),
                                +                                                      (Data.Complex.Complex Float),
                                +                                                      Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t) => 
                                +                                        Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                        -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                        -> Tensor v'3 t -- ^ __delta__: The change.
                                +                                        -> Tensor v'4 t -- ^ __lambda__: The variance parameter.
                                +                                        -> Tensor v'5 ResourceHandle -- ^ __shadow__: Same as "var".
                                +                                        -> m' (ControlNode)
                                +applyDelayCompensatedGradientDescent = applyDelayCompensatedGradientDescent' id
                                +applyDelayCompensatedGradientDescent' :: forall v'1 v'2 v'3 v'4 v'5 t
                                +                                         m' . (MonadBuild m',
                                +                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                       (Data.Complex.Complex Float),
                                +                                                       Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t) => OpParams ->
                                +                                         Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                         -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                         -> Tensor v'3 t -- ^ __delta__: The change.
                                +                                         -> Tensor v'4 t -- ^ __lambda__: The variance parameter.
                                +                                         -> Tensor v'5 ResourceHandle -- ^ __shadow__: Same as "var".
                                +                                         -> m' (ControlNode)
                                +applyDelayCompensatedGradientDescent' op'options var alpha delta lambda
                                +                                      shadow | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs alpha,
                                +                                                             buildInputs delta,
                                +                                                             buildInputs lambda,
                                +                                                             buildInputs shadow]
                                +        buildOp [] (opDef "ApplyDelayCompensatedGradientDescent"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "alpha"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "delta" description: "The change." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lambda"
                                +  description: "The variance parameter."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "shadow" description: "Same as \"var\"." type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the Ftrl-proximal scheme.
                                +--
                                +-- accum_new = accum + grad * grad
                                +-- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
                                +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
                                +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
                                +-- accum = accum_new
                                +applyFtrl :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m',
                                +                                                OneOf '[(Data.Complex.Complex Double),
                                +                                                        (Data.Complex.Complex Float),
                                +                                                        Data.Int.Int16,
                                +                                                        Data.Int.Int32,
                                +                                                        Data.Int.Int64,
                                +                                                        Data.Int.Int8,
                                +                                                        Data.Word.Word16,
                                +                                                        Data.Word.Word8, Double,
                                +                                                        Float] t) => 
                                +             Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +             -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +             -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
                                +             -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +             -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +             -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
                                +             -> Tensor v'7 t -- ^ __l2__: L2 regulariation. Must be a scalar.
                                +             -> Tensor v'8 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +             -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyFtrl = applyFtrl' id
                                +applyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m',
                                +                                                 OneOf '[(Data.Complex.Complex Double),
                                +                                                         (Data.Complex.Complex Float),
                                +                                                         Data.Int.Int16,
                                +                                                         Data.Int.Int32,
                                +                                                         Data.Int.Int64,
                                +                                                         Data.Int.Int8,
                                +                                                         Data.Word.Word16,
                                +                                                         Data.Word.Word8,
                                +                                                         Double, Float] t) =>
                                +              OpParams ->
                                +              Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +              -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +              -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
                                +              -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +              -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +              -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
                                +              -> Tensor v'7 t -- ^ __l2__: L2 regulariation. Must be a scalar.
                                +              -> Tensor v'8 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +              -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyFtrl' op'options var accum linear grad lr l1 l2
                                +           lr_power | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs linear,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs lr_power]
                                +        buildOp [] (opDef "ApplyFtrl"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "linear"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr_power"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the Ftrl-proximal scheme.
                                +--
                                +-- grad_with_shrinkage = grad + 2 * l2_shrinkage * var
                                +-- accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
                                +-- linear += grad_with_shrinkage +
                                +--     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
                                +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
                                +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
                                +-- accum = accum_new
                                +applyFtrlV2 :: forall v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m',
                                +                                                      OneOf '[(Data.Complex.Complex Double),
                                +                                                              (Data.Complex.Complex Float),
                                +                                                              Data.Int.Int16,
                                +                                                              Data.Int.Int32,
                                +                                                              Data.Int.Int64,
                                +                                                              Data.Int.Int8,
                                +                                                              Data.Word.Word16,
                                +                                                              Data.Word.Word8,
                                +                                                              Double,
                                +                                                              Float] t) => 
                                +               Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +               -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +               -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
                                +               -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +               -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +               -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
                                +               -> Tensor v'7 t -- ^ __l2__: L2 shrinkage regulariation. Must be a scalar.
                                +               -> Tensor v'8 t -- ^ __l2_shrinkage__
                                +               -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +               -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyFtrlV2 = applyFtrlV2' id
                                +applyFtrlV2' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m',
                                +                                                       OneOf '[(Data.Complex.Complex Double),
                                +                                                               (Data.Complex.Complex Float),
                                +                                                               Data.Int.Int16,
                                +                                                               Data.Int.Int32,
                                +                                                               Data.Int.Int64,
                                +                                                               Data.Int.Int8,
                                +                                                               Data.Word.Word16,
                                +                                                               Data.Word.Word8,
                                +                                                               Double,
                                +                                                               Float] t) =>
                                +                OpParams ->
                                +                Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
                                +                -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
                                +                -> Tensor v'7 t -- ^ __l2__: L2 shrinkage regulariation. Must be a scalar.
                                +                -> Tensor v'8 t -- ^ __l2_shrinkage__
                                +                -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyFtrlV2' op'options var accum linear grad lr l1 l2 l2_shrinkage
                                +             lr_power | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs linear,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs l2_shrinkage,
                                +                                                             buildInputs lr_power]
                                +        buildOp [] (opDef "ApplyFtrlV2"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "linear"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 shrinkage regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "l2_shrinkage" type_attr: "T" }
                                +input_arg {
                                +  name: "lr_power"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' by subtracting 'alpha' * 'delta' from it.
                                +
                                +applyGradientDescent :: forall v'2 v'3 t m' . (MonadBuild m',
                                +                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                       (Data.Complex.Complex Float),
                                +                                                       Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t) => 
                                +                        Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                        -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                        -> Tensor v'3 t -- ^ __delta__: The change.
                                +                        -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyGradientDescent = applyGradientDescent' id
                                +applyGradientDescent' :: forall v'2 v'3 t m' . (MonadBuild m',
                                +                                                OneOf '[(Data.Complex.Complex Double),
                                +                                                        (Data.Complex.Complex Float),
                                +                                                        Data.Int.Int16,
                                +                                                        Data.Int.Int32,
                                +                                                        Data.Int.Int64,
                                +                                                        Data.Int.Int8,
                                +                                                        Data.Word.Word16,
                                +                                                        Data.Word.Word8, Double,
                                +                                                        Float] t) => OpParams ->
                                +                         Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                         -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                         -> Tensor v'3 t -- ^ __delta__: The change.
                                +                         -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyGradientDescent' op'options var alpha delta | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs alpha,
                                +                                                             buildInputs delta]
                                +        buildOp [] (opDef "ApplyGradientDescent"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "alpha"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "delta" description: "The change." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the momentum scheme. Set use_nesterov = True if you
                                +--
                                +-- want to use Nesterov momentum.
                                +-- 
                                +-- accum = accum * momentum + grad
                                +-- var -= lr * accum
                                +applyMomentum :: forall v'3 v'4 v'5 t m' . (MonadBuild m',
                                +                                            OneOf '[(Data.Complex.Complex Double),
                                +                                                    (Data.Complex.Complex Float),
                                +                                                    Data.Int.Int16,
                                +                                                    Data.Int.Int32,
                                +                                                    Data.Int.Int64,
                                +                                                    Data.Int.Int8,
                                +                                                    Data.Word.Word16,
                                +                                                    Data.Word.Word8, Double,
                                +                                                    Float] t) => 
                                +                 Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                 -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                 -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                 -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                 -> Tensor v'5 t -- ^ __momentum__: Momentum. Must be a scalar.
                                +                 -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyMomentum = applyMomentum' id
                                +applyMomentum' :: forall v'3 v'4 v'5 t m' . (MonadBuild m',
                                +                                             OneOf '[(Data.Complex.Complex Double),
                                +                                                     (Data.Complex.Complex Float),
                                +                                                     Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Int.Int64,
                                +                                                     Data.Int.Int8,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8, Double,
                                +                                                     Float] t) => OpParams ->
                                +                  Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                  -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                  -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                  -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                  -> Tensor v'5 t -- ^ __momentum__: Momentum. Must be a scalar.
                                +                  -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyMomentum' op'options var accum lr grad momentum | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs momentum]
                                +        buildOp [] (opDef "ApplyMomentum"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "momentum"
                                +  description: "Momentum. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +attr {
                                +  name: "use_nesterov"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
                                +}
                                +-}
                                +
                                +-- | Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
                                +--
                                +-- accum += grad * grad
                                +-- prox_v = var - lr * grad * (1 / sqrt(accum))
                                +-- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
                                +applyProximalAdagrad :: forall v'3 v'4 v'5 v'6 t m' . (MonadBuild m',
                                +                                                       OneOf '[(Data.Complex.Complex Double),
                                +                                                               (Data.Complex.Complex Float),
                                +                                                               Data.Int.Int16,
                                +                                                               Data.Int.Int32,
                                +                                                               Data.Int.Int64,
                                +                                                               Data.Int.Int8,
                                +                                                               Data.Word.Word16,
                                +                                                               Data.Word.Word8,
                                +                                                               Double,
                                +                                                               Float] t) => 
                                +                        Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                        -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                        -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                        -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                        -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                        -> Tensor v'6 t -- ^ __grad__: The gradient.
                                +                        -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyProximalAdagrad = applyProximalAdagrad' id
                                +applyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 t m' . (MonadBuild m',
                                +                                                        OneOf '[(Data.Complex.Complex Double),
                                +                                                                (Data.Complex.Complex Float),
                                +                                                                Data.Int.Int16,
                                +                                                                Data.Int.Int32,
                                +                                                                Data.Int.Int64,
                                +                                                                Data.Int.Int8,
                                +                                                                Data.Word.Word16,
                                +                                                                Data.Word.Word8,
                                +                                                                Double,
                                +                                                                Float] t) =>
                                +                         OpParams ->
                                +                         Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                         -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                         -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                         -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                         -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                         -> Tensor v'6 t -- ^ __grad__: The gradient.
                                +                         -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyProximalAdagrad' op'options var accum lr l1 l2 grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ApplyProximalAdagrad"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' as FOBOS algorithm with fixed learning rate.
                                +--
                                +-- prox_v = var - alpha * delta
                                +-- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
                                +applyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 t m' . (MonadBuild m',
                                +                                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                                       (Data.Complex.Complex Float),
                                +                                                                       Data.Int.Int16,
                                +                                                                       Data.Int.Int32,
                                +                                                                       Data.Int.Int64,
                                +                                                                       Data.Int.Int8,
                                +                                                                       Data.Word.Word16,
                                +                                                                       Data.Word.Word8,
                                +                                                                       Double,
                                +                                                                       Float] t) =>
                                +                                
                                +                                Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                                -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                -> Tensor v'5 t -- ^ __delta__: The change.
                                +                                -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyProximalGradientDescent = applyProximalGradientDescent' id
                                +applyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 t m' . (MonadBuild m',
                                +                                                                OneOf '[(Data.Complex.Complex Double),
                                +                                                                        (Data.Complex.Complex Float),
                                +                                                                        Data.Int.Int16,
                                +                                                                        Data.Int.Int32,
                                +                                                                        Data.Int.Int64,
                                +                                                                        Data.Int.Int8,
                                +                                                                        Data.Word.Word16,
                                +                                                                        Data.Word.Word8,
                                +                                                                        Double,
                                +                                                                        Float] t) =>
                                +                                 OpParams ->
                                +                                 Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                                 -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                 -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                 -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                 -> Tensor v'5 t -- ^ __delta__: The change.
                                +                                 -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyProximalGradientDescent' op'options var alpha l1 l2
                                +                              delta | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs alpha,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs delta]
                                +        buildOp [] (opDef "ApplyProximalGradientDescent"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "alpha"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "delta" description: "The change." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the RMSProp algorithm.
                                +--
                                +-- Note that in dense implementation of this algorithm, ms and mom will
                                +-- update even if the grad is zero, but in this sparse implementation, ms
                                +-- and mom will not update in iterations during which the grad is zero.
                                +-- 
                                +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
                                +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
                                +-- 
                                +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
                                +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
                                +-- var <- var - mom
                                +applyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m',
                                +                                                   OneOf '[(Data.Complex.Complex Double),
                                +                                                           (Data.Complex.Complex Float),
                                +                                                           Data.Int.Int16,
                                +                                                           Data.Int.Int32,
                                +                                                           Data.Int.Int64,
                                +                                                           Data.Int.Int8,
                                +                                                           Data.Word.Word16,
                                +                                                           Data.Word.Word8,
                                +                                                           Double, Float] t) => 
                                +                Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
                                +                -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
                                +                -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                -> Tensor v'6 t -- ^ __momentum__
                                +                -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                -> Tensor v'8 t -- ^ __grad__: The gradient.
                                +                -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyRMSProp = applyRMSProp' id
                                +applyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m',
                                +                                                    OneOf '[(Data.Complex.Complex Double),
                                +                                                            (Data.Complex.Complex Float),
                                +                                                            Data.Int.Int16,
                                +                                                            Data.Int.Int32,
                                +                                                            Data.Int.Int64,
                                +                                                            Data.Int.Int8,
                                +                                                            Data.Word.Word16,
                                +                                                            Data.Word.Word8,
                                +                                                            Double, Float] t) =>
                                +                 OpParams ->
                                +                 Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                 -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
                                +                 -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
                                +                 -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                 -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                 -> Tensor v'6 t -- ^ __momentum__
                                +                 -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                 -> Tensor v'8 t -- ^ __grad__: The gradient.
                                +                 -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +applyRMSProp' op'options var ms mom lr rho momentum epsilon
                                +              grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs ms,
                                +                                                             buildInputs mom,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs momentum,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ApplyRMSProp"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "ms"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "mom"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "momentum" type_attr: "T" }
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Returns the truth value of abs(x-y) < tolerance element-wise.
                                +
                                +approximateEqual :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                                (Data.Complex.Complex Float),
                                +                                                Data.Int.Int16, Data.Int.Int32,
                                +                                                Data.Int.Int64, Data.Int.Int8,
                                +                                                Data.Word.Word16,
                                +                                                Data.Word.Word8, Double,
                                +                                                Float] t) => 
                                +                    Tensor v'1 t -- ^ __x__
                                +                    -> Tensor v'2 t -- ^ __y__
                                +                    -> Tensor Build Bool -- ^ __z__
                                +approximateEqual = approximateEqual' id
                                +approximateEqual' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                                 (Data.Complex.Complex Float),
                                +                                                 Data.Int.Int16, Data.Int.Int32,
                                +                                                 Data.Int.Int64, Data.Int.Int8,
                                +                                                 Data.Word.Word16,
                                +                                                 Data.Word.Word8, Double,
                                +                                                 Float] t) => OpParams ->
                                +                     Tensor v'1 t -- ^ __x__
                                +                     -> Tensor v'2 t -- ^ __y__
                                +                     -> Tensor Build Bool -- ^ __z__
                                +approximateEqual' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "ApproximateEqual"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "tolerance" type: "float" default_value { f: 1.0e-5 }
                                +}
                                +-}
                                +
                                +-- | Returns the index with the largest value across dimensions of a tensor.
                                +--
                                +-- Note that in case of ties the identity of the return value is not guaranteed.
                                +argMax :: forall v'1 v'2 t tidx
                                +          output_type . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                 Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                                 Data.Word.Word16, Data.Word.Word8, Double,
                                +                                 Float] t, OneOf '[Data.Int.Int32,
                                +                                                   Data.Int.Int64] tidx,
                                +                         OneOf '[Data.Int.Int32, Data.Int.Int64] output_type) =>
                                +          
                                +          Tensor v'1 t -- ^ __input__
                                +          -> Tensor v'2 tidx -- ^ __dimension__: int32 or int64, 0 <= dimension < rank(input).  Describes
                                +                             -- which dimension of the input Tensor to reduce across. For vectors,
                                +                             -- use dimension = 0.
                                +          -> Tensor Build output_type -- ^ __output__
                                +argMax = argMax' id
                                +argMax' :: forall v'1 v'2 t tidx
                                +           output_type . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                  Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                                  Data.Word.Word16, Data.Word.Word8, Double,
                                +                                  Float] t, OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tidx,
                                +                          OneOf '[Data.Int.Int32,
                                +                                  Data.Int.Int64] output_type) => OpParams ->
                                +           Tensor v'1 t -- ^ __input__
                                +           -> Tensor v'2 tidx -- ^ __dimension__: int32 or int64, 0 <= dimension < rank(input).  Describes
                                +                              -- which dimension of the input Tensor to reduce across. For vectors,
                                +                              -- use dimension = 0.
                                +           -> Tensor Build output_type -- ^ __output__
                                +argMax' op'options input dimension | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs dimension]
                                +        return (opDef "ArgMax"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & opAttr "output_type" .~ tensorType (undefined :: output_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg {
                                +  name: "dimension"
                                +  description: "int32 or int64, 0 <= dimension < rank(input).  Describes\nwhich dimension of the input Tensor to reduce across. For vectors,\nuse dimension = 0."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg { name: "output" type_attr: "output_type" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "output_type"
                                +  type: "type"
                                +  default_value { type: DT_INT64 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Returns the index with the smallest value across dimensions of a tensor.
                                +--
                                +-- Note that in case of ties the identity of the return value is not guaranteed.
                                +argMin :: forall v'1 v'2 t tidx
                                +          output_type . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                 Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                                 Data.Word.Word16, Data.Word.Word8, Double,
                                +                                 Float] t, OneOf '[Data.Int.Int32,
                                +                                                   Data.Int.Int64] tidx,
                                +                         OneOf '[Data.Int.Int32, Data.Int.Int64] output_type) =>
                                +          
                                +          Tensor v'1 t -- ^ __input__
                                +          -> Tensor v'2 tidx -- ^ __dimension__: int32 or int64, 0 <= dimension < rank(input).  Describes
                                +                             -- which dimension of the input Tensor to reduce across. For vectors,
                                +                             -- use dimension = 0.
                                +          -> Tensor Build output_type -- ^ __output__
                                +argMin = argMin' id
                                +argMin' :: forall v'1 v'2 t tidx
                                +           output_type . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                  Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                                  Data.Word.Word16, Data.Word.Word8, Double,
                                +                                  Float] t, OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tidx,
                                +                          OneOf '[Data.Int.Int32,
                                +                                  Data.Int.Int64] output_type) => OpParams ->
                                +           Tensor v'1 t -- ^ __input__
                                +           -> Tensor v'2 tidx -- ^ __dimension__: int32 or int64, 0 <= dimension < rank(input).  Describes
                                +                              -- which dimension of the input Tensor to reduce across. For vectors,
                                +                              -- use dimension = 0.
                                +           -> Tensor Build output_type -- ^ __output__
                                +argMin' op'options input dimension | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs dimension]
                                +        return (opDef "ArgMin"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & opAttr "output_type" .~ tensorType (undefined :: output_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg {
                                +  name: "dimension"
                                +  description: "int32 or int64, 0 <= dimension < rank(input).  Describes\nwhich dimension of the input Tensor to reduce across. For vectors,\nuse dimension = 0."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg { name: "output" type_attr: "output_type" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "output_type"
                                +  type: "type"
                                +  default_value { type: DT_INT64 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Converts each entry in the given tensor to strings.  Supports many numeric
                                +--
                                +-- types and boolean.
                                +asString :: forall v'1 t . (OneOf '[(Data.Complex.Complex Float), Bool,
                                +                                    Data.Int.Int32, Data.Int.Int64,
                                +                                    Data.Int.Int8, Double, Float] t) => 
                                +            Tensor v'1 t -- ^ __input__
                                +            -> Tensor Build Data.ByteString.ByteString -- ^ __output__
                                +asString = asString' id
                                +asString' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Float), Bool,
                                +                                     Data.Int.Int32, Data.Int.Int64,
                                +                                     Data.Int.Int8, Double, Float] t) =>
                                +             OpParams ->
                                +             Tensor v'1 t -- ^ __input__
                                +             -> Tensor Build Data.ByteString.ByteString -- ^ __output__
                                +asString' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "AsString"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type: DT_STRING }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_BOOL
                                +      type: DT_INT8
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "precision"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The post-decimal precision to use for floating point numbers.\nOnly used if precision > -1."
                                +}
                                +attr {
                                +  name: "scientific"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Use scientific notation for floating point numbers."
                                +}
                                +attr {
                                +  name: "shortest"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Use shortest representation (either scientific or standard) for\nfloating point numbers."
                                +}
                                +attr {
                                +  name: "width"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "Pad pre-decimal numbers to this width.\nApplies to both floating point and integer numbers.\nOnly used if width > -1."
                                +}
                                +attr {
                                +  name: "fill"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "The value to pad if width > -1.  If empty, pads with spaces.\nAnother typical value is \'0\'.  String cannot be longer than 1 character."
                                +}
                                +-}
                                +
                                +-- | Computes asin of x element-wise.
                                +
                                +asin :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                Data.Int.Int64, Data.Word.Word16, Double,
                                +                                Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +asin = asin' id
                                +asin' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                 Data.Int.Int64, Data.Word.Word16, Double,
                                +                                 Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +asin' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Asin"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes inverse hyperbolic sine of x element-wise.
                                +
                                +asinh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +asinh = asinh' id
                                +asinh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float),
                                +                                  Data.Word.Word16, Double, Float] t) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +asinh' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Asinh"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Asserts that the given condition is true.
                                +--
                                +-- If `condition` evaluates to false, print the list of tensors in `data`.
                                +-- `summarize` determines how many entries of the tensors to print.
                                +assert :: forall v'1 v'2 t m' . (MonadBuild m', TensorTypes t) => 
                                +          Tensor v'1 Bool -- ^ __condition__: The condition to evaluate.
                                +          -> TensorList (v'2) t -- ^ __data__: The tensors to print out when condition is false.
                                +          -> m' (ControlNode)
                                +assert = assert' id
                                +assert' :: forall v'1 v'2 t m' . (MonadBuild m', TensorTypes t) => OpParams ->
                                +           Tensor v'1 Bool -- ^ __condition__: The condition to evaluate.
                                +           -> TensorList (v'2) t -- ^ __data__: The tensors to print out when condition is false.
                                +           -> m' (ControlNode)
                                +assert' op'options condition data' | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs condition,
                                +                                                             buildInputs data']
                                +        buildOp [] (opDef "Assert"
                                +                    & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "condition"
                                +  description: "The condition to evaluate."
                                +  type: DT_BOOL
                                +}
                                +input_arg {
                                +  name: "data"
                                +  description: "The tensors to print out when condition is false."
                                +  type_list_attr: "T"
                                +}
                                +attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 }
                                +attr {
                                +  name: "summarize"
                                +  type: "int"
                                +  default_value { i: 3 }
                                +  description: "Print this many entries of each tensor."
                                +}
                                +-}
                                +
                                +-- | Update 'ref' by assigning 'value' to it.
                                +--
                                +-- This operation outputs "ref" after the assignment is done.
                                +-- This makes it easier to chain operations that need to use the reset value.
                                +assign :: forall v'2 t m' . (MonadBuild m', TensorType t) => 
                                +          Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. May be uninitialized.
                                +          -> Tensor v'2 t -- ^ __value__: The value to be assigned to the variable.
                                +          -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
                                +          -- to use the new value after the variable has been reset.
                                +assign = assign' id
                                +assign' :: forall v'2 t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +           Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. May be uninitialized.
                                +           -> Tensor v'2 t -- ^ __value__: The value to be assigned to the variable.
                                +           -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
                                +           -- to use the new value after the variable has been reset.
                                +assign' op'options ref value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs value]
                                +        buildOp [] (opDef "Assign"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a `Variable` node. May be uninitialized."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "The value to be assigned to the variable."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been reset."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "validate_shape"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If true, the operation will validate that the shape\nof \'value\' matches the shape of the Tensor being assigned to.  If false,\n\'ref\' will take on the shape of \'value\'."
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update 'ref' by adding 'value' to it.
                                +--
                                +-- This operation outputs "ref" after the update is done.
                                +-- This makes it easier to chain operations that need to use the reset value.
                                +assignAdd :: forall v'2 t m' . (MonadBuild m',
                                +                                OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => 
                                +             Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +             -> Tensor v'2 t -- ^ __value__: The value to be added to the variable.
                                +             -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
                                +             -- to use the new value after the variable has been updated.
                                +assignAdd = assignAdd' id
                                +assignAdd' :: forall v'2 t m' . (MonadBuild m',
                                +                                 OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t) => OpParams ->
                                +              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +              -> Tensor v'2 t -- ^ __value__: The value to be added to the variable.
                                +              -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
                                +              -- to use the new value after the variable has been updated.
                                +assignAdd' op'options ref value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs value]
                                +        buildOp [] (opDef "AssignAdd"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a `Variable` node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "The value to be added to the variable."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Adds a value to the current value of a variable.
                                +--
                                +-- Any ReadVariableOp which depends directly or indirectly on this assign is
                                +-- guaranteed to see the incremented value or a subsequent newer one.
                                +-- 
                                +-- Outputs the incremented value, which can be used to totally order the
                                +-- increments to this variable.
                                +assignAddVariableOp :: forall v'1 v'2 dtype m' . (MonadBuild m',
                                +                                                  TensorType dtype) => 
                                +                       Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                       -> Tensor v'2 dtype -- ^ __value__: the value by which the variable will be incremented.
                                +                       -> m' (ControlNode)
                                +assignAddVariableOp = assignAddVariableOp' id
                                +assignAddVariableOp' :: forall v'1 v'2 dtype m' . (MonadBuild m',
                                +                                                   TensorType dtype) =>
                                +                        OpParams ->
                                +                        Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                        -> Tensor v'2 dtype -- ^ __value__: the value by which the variable will be incremented.
                                +                        -> m' (ControlNode)
                                +assignAddVariableOp' op'options resource value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource,
                                +                                                             buildInputs value]
                                +        buildOp [] (opDef "AssignAddVariableOp"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "resource"
                                +  description: "handle to the resource in which to store the variable."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "the value by which the variable will be incremented."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype" type: "type" description: "the dtype of the value."
                                +}
                                +-}
                                +
                                +-- | Update 'ref' by subtracting 'value' from it.
                                +--
                                +-- This operation outputs "ref" after the update is done.
                                +-- This makes it easier to chain operations that need to use the reset value.
                                +assignSub :: forall v'2 t m' . (MonadBuild m',
                                +                                OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => 
                                +             Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +             -> Tensor v'2 t -- ^ __value__: The value to be subtracted to the variable.
                                +             -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
                                +             -- to use the new value after the variable has been updated.
                                +assignSub = assignSub' id
                                +assignSub' :: forall v'2 t m' . (MonadBuild m',
                                +                                 OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t) => OpParams ->
                                +              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +              -> Tensor v'2 t -- ^ __value__: The value to be subtracted to the variable.
                                +              -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
                                +              -- to use the new value after the variable has been updated.
                                +assignSub' op'options ref value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs value]
                                +        buildOp [] (opDef "AssignSub"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a `Variable` node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "The value to be subtracted to the variable."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Subtracts a value from the current value of a variable.
                                +--
                                +-- Any ReadVariableOp which depends directly or indirectly on this assign is
                                +-- guaranteed to see the incremented value or a subsequent newer one.
                                +-- 
                                +-- Outputs the incremented value, which can be used to totally order the
                                +-- increments to this variable.
                                +assignSubVariableOp :: forall v'1 v'2 dtype m' . (MonadBuild m',
                                +                                                  TensorType dtype) => 
                                +                       Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                       -> Tensor v'2 dtype -- ^ __value__: the value by which the variable will be incremented.
                                +                       -> m' (ControlNode)
                                +assignSubVariableOp = assignSubVariableOp' id
                                +assignSubVariableOp' :: forall v'1 v'2 dtype m' . (MonadBuild m',
                                +                                                   TensorType dtype) =>
                                +                        OpParams ->
                                +                        Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                        -> Tensor v'2 dtype -- ^ __value__: the value by which the variable will be incremented.
                                +                        -> m' (ControlNode)
                                +assignSubVariableOp' op'options resource value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource,
                                +                                                             buildInputs value]
                                +        buildOp [] (opDef "AssignSubVariableOp"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "resource"
                                +  description: "handle to the resource in which to store the variable."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "the value by which the variable will be incremented."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype" type: "type" description: "the dtype of the value."
                                +}
                                +-}
                                +
                                +-- | Assigns a new value to a variable.
                                +--
                                +-- Any ReadVariableOp with a control dependency on this op is guaranteed to return
                                +-- this value or a subsequent newer value of the variable.
                                +assignVariableOp :: forall v'1 v'2 dtype m' . (MonadBuild m',
                                +                                               TensorType dtype) => 
                                +                    Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                    -> Tensor v'2 dtype -- ^ __value__: the value to set the new tensor to use.
                                +                    -> m' (ControlNode)
                                +assignVariableOp = assignVariableOp' id
                                +assignVariableOp' :: forall v'1 v'2 dtype m' . (MonadBuild m',
                                +                                                TensorType dtype) => OpParams ->
                                +                     Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                     -> Tensor v'2 dtype -- ^ __value__: the value to set the new tensor to use.
                                +                     -> m' (ControlNode)
                                +assignVariableOp' op'options resource value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource,
                                +                                                             buildInputs value]
                                +        buildOp [] (opDef "AssignVariableOp"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "resource"
                                +  description: "handle to the resource in which to store the variable."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "the value to set the new tensor to use."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype" type: "type" description: "the dtype of the value."
                                +}
                                +-}
                                +
                                +-- | Computes atan of x element-wise.
                                +
                                +atan :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                Data.Int.Int64, Data.Word.Word16, Double,
                                +                                Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +atan = atan' id
                                +atan' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                 Data.Int.Int64, Data.Word.Word16, Double,
                                +                                 Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +atan' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Atan"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
                                +--
                                +-- This is the angle \( \theta \in [-\pi, \pi] \) such that
                                +-- \[ x = r \cos(\theta) \]
                                +-- and
                                +-- \[ y = r \sin(\theta) \]
                                +-- where \(r = \sqrt(x^2 + y^2) \).
                                +atan2 :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __y__
                                +         -> Tensor v'2 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __z__
                                +atan2 = atan2' id
                                +atan2' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams ->
                                +          Tensor v'1 t -- ^ __y__
                                +          -> Tensor v'2 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __z__
                                +atan2' op'options y x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y,
                                +                                                             buildInputs x]
                                +        return (opDef "Atan2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "y" type_attr: "T" }
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Computes inverse hyperbolic tangent of x element-wise.
                                +
                                +atanh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +atanh = atanh' id
                                +atanh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float),
                                +                                  Data.Word.Word16, Double, Float] t) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +atanh' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Atanh"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Produces a visualization of audio data over time.
                                +--
                                +-- Spectrograms are a standard way of representing audio information as a series of
                                +-- slices of frequency information, one slice for each window of time. By joining
                                +-- these together into a sequence, they form a distinctive fingerprint of the sound
                                +-- over time.
                                +-- 
                                +-- This op expects to receive audio data as an input, stored as floats in the range
                                +-- -1 to 1, together with a window width in samples, and a stride specifying how
                                +-- far to move the window between slices. From this it generates a three
                                +-- dimensional output. The lowest dimension has an amplitude value for each
                                +-- frequency during that time slice. The next dimension is time, with successive
                                +-- frequency slices. The final dimension is for the channels in the input, so a
                                +-- stereo audio input would have two here for example.
                                +-- 
                                +-- This means the layout when converted and saved as an image is rotated 90 degrees
                                +-- clockwise from a typical spectrogram. Time is descending down the Y axis, and
                                +-- the frequency decreases from left to right.
                                +-- 
                                +-- Each value in the result represents the square root of the sum of the real and
                                +-- imaginary parts of an FFT on the current window of samples. In this way, the
                                +-- lowest dimension represents the power of each frequency in the current window,
                                +-- and adjacent windows are concatenated in the next dimension.
                                +-- 
                                +-- To get a more intuitive and visual look at what this operation does, you can run
                                +-- tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
                                +-- resulting spectrogram as a PNG image.
                                +audioSpectrogram :: 
                                +                    Data.Int.Int64 -- ^ __stride__: How widely apart the center of adjacent sample windows should be.
                                +                    -> Data.Int.Int64 -- ^ __window_size__: How wide the input window is in samples. For the highest efficiency
                                +                                      -- this should be a power of two, but other values are accepted.
                                +                    -> Tensor v'1 Float -- ^ __input__: Float representation of audio data.
                                +                    -> Tensor Build Float -- ^ __spectrogram__: 3D representation of the audio frequencies as an image.
                                +audioSpectrogram = audioSpectrogram' id
                                +audioSpectrogram' :: OpParams ->
                                +                     Data.Int.Int64 -- ^ __stride__: How widely apart the center of adjacent sample windows should be.
                                +                     -> Data.Int.Int64 -- ^ __window_size__: How wide the input window is in samples. For the highest efficiency
                                +                                       -- this should be a power of two, but other values are accepted.
                                +                     -> Tensor v'1 Float -- ^ __input__: Float representation of audio data.
                                +                     -> Tensor Build Float -- ^ __spectrogram__: 3D representation of the audio frequencies as an image.
                                +audioSpectrogram' op'options stride window_size input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "AudioSpectrogram"
                                +                & opAttr "stride" .~ stride
                                +                & opAttr "window_size" .~ window_size
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Float representation of audio data."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "spectrogram"
                                +  description: "3D representation of the audio frequencies as an image."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "window_size"
                                +  type: "int"
                                +  description: "How wide the input window is in samples. For the highest efficiency\nthis should be a power of two, but other values are accepted."
                                +}
                                +attr {
                                +  name: "stride"
                                +  type: "int"
                                +  description: "How widely apart the center of adjacent sample windows should be."
                                +}
                                +attr {
                                +  name: "magnitude_squared"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Whether to return the squared magnitude or just the\nmagnitude. Using squared magnitude can avoid extra calculations."
                                +}
                                +-}
                                +
                                +-- | Outputs a `Summary` protocol buffer with audio.
                                +--
                                +-- The summary has up to `max_outputs` summary values containing audio. The
                                +-- audio is built from `tensor` which must be 3-D with shape `[batch_size,
                                +-- frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
                                +-- assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
                                +-- 
                                +-- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
                                +-- build the `tag` of the summary values:
                                +-- 
                                +-- *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
                                +-- *  If `max_outputs` is greater than 1, the summary value tags are
                                +--    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
                                +audioSummary :: 
                                +                Float -- ^ __sample_rate__: The sample rate of the signal in hertz.
                                +                -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
                                +                -> Tensor v'2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`.
                                +                -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +audioSummary = audioSummary' id
                                +audioSummary' :: OpParams ->
                                +                 Float -- ^ __sample_rate__: The sample rate of the signal in hertz.
                                +                 -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
                                +                 -> Tensor v'2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`.
                                +                 -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +audioSummary' op'options sample_rate tag tensor | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag,
                                +                                                             buildInputs tensor]
                                +        return (opDef "AudioSummary"
                                +                & opAttr "sample_rate" .~ sample_rate
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tag"
                                +  description: "Scalar. Used to build the `tag` attribute of the summary values."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor"
                                +  description: "2-D of shape `[batch_size, frames]`."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "summary"
                                +  description: "Scalar. Serialized `Summary` protocol buffer."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "sample_rate"
                                +  type: "float"
                                +  description: "The sample rate of the signal in hertz."
                                +}
                                +attr {
                                +  name: "max_outputs"
                                +  type: "int"
                                +  default_value { i: 3 }
                                +  description: "Max number of batch elements to generate audio for."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Outputs a `Summary` protocol buffer with audio.
                                +--
                                +-- The summary has up to `max_outputs` summary values containing audio. The
                                +-- audio is built from `tensor` which must be 3-D with shape `[batch_size,
                                +-- frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
                                +-- assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
                                +-- 
                                +-- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
                                +-- build the `tag` of the summary values:
                                +-- 
                                +-- *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
                                +-- *  If `max_outputs` is greater than 1, the summary value tags are
                                +--    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
                                +audioSummaryV2 :: 
                                +                  Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
                                +                  -> Tensor v'2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`.
                                +                  -> Tensor v'3 Float -- ^ __sample_rate__: The sample rate of the signal in hertz.
                                +                  -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +audioSummaryV2 = audioSummaryV2' id
                                +audioSummaryV2' :: OpParams ->
                                +                   Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
                                +                   -> Tensor v'2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`.
                                +                   -> Tensor v'3 Float -- ^ __sample_rate__: The sample rate of the signal in hertz.
                                +                   -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +audioSummaryV2' op'options tag tensor sample_rate | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag,
                                +                                                             buildInputs tensor,
                                +                                                             buildInputs sample_rate]
                                +        return (opDef "AudioSummaryV2"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tag"
                                +  description: "Scalar. Used to build the `tag` attribute of the summary values."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor"
                                +  description: "2-D of shape `[batch_size, frames]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "sample_rate"
                                +  description: "The sample rate of the signal in hertz."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "summary"
                                +  description: "Scalar. Serialized `Summary` protocol buffer."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "max_outputs"
                                +  type: "int"
                                +  default_value { i: 3 }
                                +  description: "Max number of batch elements to generate audio for."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Performs average pooling on the input.
                                +--
                                +-- Each entry in `output` is the mean of the corresponding size `ksize`
                                +-- window in `value`.
                                +avgPool :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
                                +           -> Tensor Build t -- ^ __output__: The average pooled output tensor.
                                +avgPool = avgPool' id
                                +avgPool' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
                                +            -> Tensor Build t -- ^ __output__: The average pooled output tensor.
                                +avgPool' op'options value | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value]
                                +        return (opDef "AvgPool"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The average pooled output tensor."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the sliding window for each dimension of `value`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of `value`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Performs 3D average pooling on the input.
                                +
                                +avgPool3D :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +             Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
                                +             -> Tensor Build t -- ^ __output__: The average pooled output tensor.
                                +avgPool3D = avgPool3D' id
                                +avgPool3D' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams ->
                                +              Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
                                +              -> Tensor Build t -- ^ __output__: The average pooled output tensor.
                                +avgPool3D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "AvgPool3D"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The average pooled output tensor."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NDHWC" }
                                +  description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
                                +  allowed_values { list { s: "NDHWC" s: "NCDHW" } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Computes gradients of average pooling function.
                                +
                                +avgPool3DGrad :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +                 Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__: The original input dimensions.
                                +                 -> Tensor v'2 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
                                +                 -> Tensor Build t -- ^ __output__: The backprop for input.
                                +avgPool3DGrad = avgPool3DGrad' id
                                +avgPool3DGrad' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams ->
                                +                  Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__: The original input dimensions.
                                +                  -> Tensor v'2 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
                                +                  -> Tensor Build t -- ^ __output__: The backprop for input.
                                +avgPool3DGrad' op'options orig_input_shape grad | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input_shape,
                                +                                                             buildInputs grad]
                                +        return (opDef "AvgPool3DGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "orig_input_shape"
                                +  description: "The original input dimensions."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "grad"
                                +  description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The backprop for input."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NDHWC" }
                                +  description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
                                +  allowed_values { list { s: "NDHWC" s: "NCDHW" } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Computes gradients of the average pooling function.
                                +
                                +avgPoolGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double,
                                +                                           Float] t) => 
                                +               Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__: 1-D.  Shape of the original input to `avg_pool`.
                                +               -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
                                +                               -- the output of `avg_pool`.
                                +               -> Tensor Build t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `avg_pool`.
                                +avgPoolGrad = avgPoolGrad' id
                                +avgPoolGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double,
                                +                                            Float] t) => OpParams ->
                                +                Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__: 1-D.  Shape of the original input to `avg_pool`.
                                +                -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
                                +                                -- the output of `avg_pool`.
                                +                -> Tensor Build t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `avg_pool`.
                                +avgPoolGrad' op'options orig_input_shape grad | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input_shape,
                                +                                                             buildInputs grad]
                                +        return (opDef "AvgPoolGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "orig_input_shape"
                                +  description: "1-D.  Shape of the original input to `avg_pool`."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "grad"
                                +  description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.\nthe output of `avg_pool`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D.  Gradients w.r.t. the input of `avg_pool`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the sliding window for each dimension of the input."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the input."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Defines a barrier that persists across different graph executions.
                                +--
                                +-- A barrier represents a key-value map, where each key is a string, and
                                +-- each value is a tuple of tensors.
                                +-- 
                                +-- At runtime, the barrier contains 'complete' and 'incomplete'
                                +-- elements. A complete element has defined tensors for all components of
                                +-- its value tuple, and may be accessed using BarrierTakeMany. An
                                +-- incomplete element has some undefined components in its value tuple,
                                +-- and may be updated using BarrierInsertMany.
                                +barrier :: forall m' . (MonadBuild m') => 
                                +           [DataType] -- ^ __component_types__: The type of each component in a value.
                                +           -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the barrier.
                                +barrier = barrier' id
                                +barrier' :: forall m' . (MonadBuild m') => OpParams ->
                                +            [DataType] -- ^ __component_types__: The type of each component in a value.
                                +            -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the barrier.
                                +barrier' op'options component_types | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "Barrier"
                                +                    & opAttr "component_types" .~ component_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the barrier."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "shapes"
                                +  type: "list(shape)"
                                +  default_value { list { } }
                                +  description: "The shape of each component in a value. Each shape must be 1 in the\nfirst dimension. The length of this attr must be the same as the length of\ncomponent_types."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The capacity of the barrier.  The default capacity is MAX_INT32,\nwhich is the largest capacity of the underlying queue."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this barrier is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this barrier will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | Closes the given barrier.
                                +--
                                +-- This operation signals that no more new elements will be inserted in the
                                +-- given barrier. Subsequent InsertMany that try to introduce a new key will fail.
                                +-- Subsequent InsertMany operations that just add missing components to already
                                +-- existing elements will continue to succeed. Subsequent TakeMany operations will
                                +-- continue to succeed if sufficient completed elements remain in the barrier.
                                +-- Subsequent TakeMany operations that would block will fail immediately.
                                +barrierClose :: forall m' . (MonadBuild m') => 
                                +                Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                -> m' (ControlNode)
                                +barrierClose = barrierClose' id
                                +barrierClose' :: forall m' . (MonadBuild m') => OpParams ->
                                +                 Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                 -> m' (ControlNode)
                                +barrierClose' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "BarrierClose"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a barrier."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "cancel_pending_enqueues"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, all pending enqueue requests that are\nblocked on the barrier\'s queue will be canceled. InsertMany will fail, even\nif no new key is introduced."
                                +}
                                +-}
                                +
                                +-- | Computes the number of incomplete elements in the given barrier.
                                +
                                +barrierIncompleteSize :: forall m' . (MonadBuild m') => 
                                +                         Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                         -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of incomplete elements (i.e. those with some of their value
                                +                         -- components not set) in the barrier.
                                +barrierIncompleteSize = barrierIncompleteSize' id
                                +barrierIncompleteSize' :: forall m' . (MonadBuild m') => OpParams ->
                                +                          Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                          -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of incomplete elements (i.e. those with some of their value
                                +                          -- components not set) in the barrier.
                                +barrierIncompleteSize' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "BarrierIncompleteSize"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a barrier."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "The number of incomplete elements (i.e. those with some of their value\ncomponents not set) in the barrier."
                                +  type: DT_INT32
                                +}
                                +-}
                                +
                                +-- | For each key, assigns the respective value to the specified component.
                                +--
                                +-- If a key is not found in the barrier, this operation will create a new
                                +-- incomplete element. If a key is found in the barrier, and the element
                                +-- already has a value at component_index, this operation will fail with
                                +-- INVALID_ARGUMENT, and leave the barrier in an undefined state.
                                +barrierInsertMany :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => 
                                +                     Data.Int.Int64 -- ^ __component_index__: The component of the barrier elements that is being assigned.
                                +                     -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                     -> Tensor v'2 Data.ByteString.ByteString -- ^ __keys__: A one-dimensional tensor of keys, with length n.
                                +                     -> Tensor v'3 t -- ^ __values__: An any-dimensional tensor of values, which are associated with the
                                +                                     -- respective keys. The 0th dimension must have length n.
                                +                     -> m' (ControlNode)
                                +barrierInsertMany = barrierInsertMany' id
                                +barrierInsertMany' :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) =>
                                +                      OpParams ->
                                +                      Data.Int.Int64 -- ^ __component_index__: The component of the barrier elements that is being assigned.
                                +                      -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                      -> Tensor v'2 Data.ByteString.ByteString -- ^ __keys__: A one-dimensional tensor of keys, with length n.
                                +                      -> Tensor v'3 t -- ^ __values__: An any-dimensional tensor of values, which are associated with the
                                +                                      -- respective keys. The 0th dimension must have length n.
                                +                      -> m' (ControlNode)
                                +barrierInsertMany' op'options component_index handle keys
                                +                   values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs keys,
                                +                                                             buildInputs values]
                                +        buildOp [] (opDef "BarrierInsertMany"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "component_index" .~ component_index
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a barrier."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "keys"
                                +  description: "A one-dimensional tensor of keys, with length n."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "An any-dimensional tensor of values, which are associated with the\nrespective keys. The 0th dimension must have length n."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "component_index"
                                +  type: "int"
                                +  description: "The component of the barrier elements that is being assigned."
                                +}
                                +-}
                                +
                                +-- | Computes the number of complete elements in the given barrier.
                                +
                                +barrierReadySize :: forall m' . (MonadBuild m') => 
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                    -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of complete elements (i.e. those with all of their value
                                +                    -- components set) in the barrier.
                                +barrierReadySize = barrierReadySize' id
                                +barrierReadySize' :: forall m' . (MonadBuild m') => OpParams ->
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                     -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of complete elements (i.e. those with all of their value
                                +                     -- components set) in the barrier.
                                +barrierReadySize' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "BarrierReadySize"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a barrier."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "The number of complete elements (i.e. those with all of their value\ncomponents set) in the barrier."
                                +  type: DT_INT32
                                +}
                                +-}
                                +
                                +-- | Takes the given number of completed elements from a barrier.
                                +--
                                +-- This operation concatenates completed-element component tensors along
                                +-- the 0th dimension to make a single component tensor.
                                +-- 
                                +-- Elements come out of the barrier when they are complete, and in the order
                                +-- in which they were placed into the barrier.  The indices output provides
                                +-- information about the batch in which each element was originally inserted
                                +-- into the barrier.
                                +barrierTakeMany :: forall v'2 component_types m' . (MonadBuild m',
                                +                                                    TensorTypes component_types) =>
                                +                   
                                +                   Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                   -> Tensor v'2 Data.Int.Int32 -- ^ __num_elements__: A single-element tensor containing the number of elements to
                                +                                                -- take.
                                +                   -> m' ((Tensor Value Data.Int.Int64,
                                +                           Tensor Value Data.ByteString.ByteString,
                                +                           TensorList (Value) component_types))
                                +                   -- ^ (__indices__, __keys__, __values__)
                                +                   --
                                +                   -- * __indices__: A one-dimensional tensor of indices, with length num_elems.
                                +                   -- These indices refer to the batch in which the values were placed into the
                                +                   -- barrier (starting with MIN_LONG and increasing with each BarrierInsertMany).
                                +                   --
                                +                   -- * __keys__: A one-dimensional tensor of keys, with length num_elements.
                                +                   --
                                +                   -- * __values__: One any-dimensional tensor per component in a barrier element. All
                                +                   -- values have length num_elements in the 0th dimension.
                                +barrierTakeMany = barrierTakeMany' id
                                +barrierTakeMany' :: forall v'2 component_types m' . (MonadBuild m',
                                +                                                     TensorTypes component_types) =>
                                +                    OpParams ->
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __num_elements__: A single-element tensor containing the number of elements to
                                +                                                 -- take.
                                +                    -> m' ((Tensor Value Data.Int.Int64,
                                +                            Tensor Value Data.ByteString.ByteString,
                                +                            TensorList (Value) component_types))
                                +                    -- ^ (__indices__, __keys__, __values__)
                                +                    --
                                +                    -- * __indices__: A one-dimensional tensor of indices, with length num_elems.
                                +                    -- These indices refer to the batch in which the values were placed into the
                                +                    -- barrier (starting with MIN_LONG and increasing with each BarrierInsertMany).
                                +                    --
                                +                    -- * __keys__: A one-dimensional tensor of keys, with length num_elements.
                                +                    --
                                +                    -- * __values__: One any-dimensional tensor per component in a barrier element. All
                                +                    -- values have length num_elements in the 0th dimension.
                                +barrierTakeMany' op'options handle num_elements | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs num_elements]
                                +        buildOp [] (opDef "BarrierTakeMany"
                                +                    & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a barrier."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "num_elements"
                                +  description: "A single-element tensor containing the number of elements to\ntake."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "indices"
                                +  description: "A one-dimensional tensor of indices, with length num_elems.\nThese indices refer to the batch in which the values were placed into the\nbarrier (starting with MIN_LONG and increasing with each BarrierInsertMany)."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "keys"
                                +  description: "A one-dimensional tensor of keys, with length num_elements."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "values"
                                +  description: "One any-dimensional tensor per component in a barrier element. All\nvalues have length num_elements in the 0th dimension."
                                +  type_list_attr: "component_types"
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "allow_small_batch"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Allow to return less than num_elements items if barrier is\nalready closed."
                                +}
                                +attr {
                                +  name: "wait_for_incomplete" type: "bool" default_value { b: false }
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchCholesky :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +                 Tensor v'1 t -- ^ __input__
                                +                 -> Tensor Build t -- ^ __output__
                                +batchCholesky = batchCholesky' id
                                +batchCholesky' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __input__
                                +                  -> Tensor Build t -- ^ __output__
                                +batchCholesky' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchCholesky"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchCholeskyGrad :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +                     Tensor v'1 t -- ^ __l__
                                +                     -> Tensor v'2 t -- ^ __grad__
                                +                     -> Tensor Build t -- ^ __output__
                                +batchCholeskyGrad = batchCholeskyGrad' id
                                +batchCholeskyGrad' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) =>
                                +                      OpParams ->
                                +                      Tensor v'1 t -- ^ __l__
                                +                      -> Tensor v'2 t -- ^ __grad__
                                +                      -> Tensor Build t -- ^ __output__
                                +batchCholeskyGrad' op'options l grad | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs l,
                                +                                                             buildInputs grad]
                                +        return (opDef "BatchCholeskyGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "l" type_attr: "T" }
                                +input_arg { name: "grad" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that batches `batch_size` elements from `input_dataset`.
                                +
                                +batchDataset :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                [DataType] -- ^ __output_types__
                                +                -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__: A scalar representing the number of elements to accumulate in a
                                +                                             -- batch.
                                +                -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +batchDataset = batchDataset' id
                                +batchDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                 [DataType] -- ^ __output_types__
                                +                 -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                 -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__: A scalar representing the number of elements to accumulate in a
                                +                                              -- batch.
                                +                 -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +batchDataset' op'options output_types input_dataset
                                +              batch_size | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset,
                                +                                                             buildInputs batch_size]
                                +        buildOp [] (opDef "BatchDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input_dataset" type: DT_RESOURCE }
                                +input_arg {
                                +  name: "batch_size"
                                +  description: "A scalar representing the number of elements to accumulate in a\nbatch."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchFFT :: 
                                +            Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +            -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchFFT = batchFFT' id
                                +batchFFT' :: OpParams ->
                                +             Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +             -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchFFT' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchFFT"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type: DT_COMPLEX64 }
                                +output_arg { name: "output" type: DT_COMPLEX64 }
                                +-}
                                +
                                +-- | 
                                +
                                +batchFFT2D :: 
                                +              Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +              -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchFFT2D = batchFFT2D' id
                                +batchFFT2D' :: OpParams ->
                                +               Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +               -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchFFT2D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchFFT2D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type: DT_COMPLEX64 }
                                +output_arg { name: "output" type: DT_COMPLEX64 }
                                +-}
                                +
                                +-- | 
                                +
                                +batchFFT3D :: 
                                +              Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +              -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchFFT3D = batchFFT3D' id
                                +batchFFT3D' :: OpParams ->
                                +               Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +               -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchFFT3D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchFFT3D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type: DT_COMPLEX64 }
                                +output_arg { name: "output" type: DT_COMPLEX64 }
                                +-}
                                +
                                +-- | 
                                +
                                +batchIFFT :: 
                                +             Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +             -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchIFFT = batchIFFT' id
                                +batchIFFT' :: OpParams ->
                                +              Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +              -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchIFFT' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchIFFT"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type: DT_COMPLEX64 }
                                +output_arg { name: "output" type: DT_COMPLEX64 }
                                +-}
                                +
                                +-- | 
                                +
                                +batchIFFT2D :: 
                                +               Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +               -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchIFFT2D = batchIFFT2D' id
                                +batchIFFT2D' :: OpParams ->
                                +                Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +                -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchIFFT2D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchIFFT2D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type: DT_COMPLEX64 }
                                +output_arg { name: "output" type: DT_COMPLEX64 }
                                +-}
                                +
                                +-- | 
                                +
                                +batchIFFT3D :: 
                                +               Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +               -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchIFFT3D = batchIFFT3D' id
                                +batchIFFT3D' :: OpParams ->
                                +                Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__
                                +                -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__
                                +batchIFFT3D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchIFFT3D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type: DT_COMPLEX64 }
                                +output_arg { name: "output" type: DT_COMPLEX64 }
                                +-}
                                +
                                +-- | Multiplies slices of two tensors in batches.
                                +--
                                +-- Multiplies all slices of `Tensor` `x` and `y` (each slice can be
                                +-- viewed as an element of a batch), and arranges the individual results
                                +-- in a single output tensor of the same batch size. Each of the
                                +-- individual slices can optionally be adjointed (to adjoint a matrix
                                +-- means to transpose and conjugate it) before multiplication by setting
                                +-- the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
                                +-- 
                                +-- The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
                                +-- and `[..., r_y, c_y]`.
                                +-- 
                                +-- The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
                                +-- 
                                +--     r_o = c_x if adj_x else r_x
                                +--     c_o = r_y if adj_y else c_y
                                +-- 
                                +-- It is computed as:
                                +-- 
                                +--     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
                                +batchMatMul :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                           (Data.Complex.Complex Float),
                                +                                           Data.Int.Int32, Data.Word.Word16,
                                +                                           Double, Float] t) => 
                                +               Tensor v'1 t -- ^ __x__: 2-D or higher with shape `[..., r_x, c_x]`.
                                +               -> Tensor v'2 t -- ^ __y__: 2-D or higher with shape `[..., r_y, c_y]`.
                                +               -> Tensor Build t -- ^ __output__: 3-D or higher with shape `[..., r_o, c_o]`
                                +batchMatMul = batchMatMul' id
                                +batchMatMul' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Data.Int.Int32, Data.Word.Word16,
                                +                                            Double, Float] t) => OpParams ->
                                +                Tensor v'1 t -- ^ __x__: 2-D or higher with shape `[..., r_x, c_x]`.
                                +                -> Tensor v'2 t -- ^ __y__: 2-D or higher with shape `[..., r_y, c_y]`.
                                +                -> Tensor Build t -- ^ __output__: 3-D or higher with shape `[..., r_o, c_o]`
                                +batchMatMul' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "BatchMatMul"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "x"
                                +  description: "2-D or higher with shape `[..., r_x, c_x]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "y"
                                +  description: "2-D or higher with shape `[..., r_y, c_y]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "3-D or higher with shape `[..., r_o, c_o]`"
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "adj_x"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, adjoint the slices of `x`. Defaults to `False`."
                                +}
                                +attr {
                                +  name: "adj_y"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, adjoint the slices of `y`. Defaults to `False`."
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchMatrixBandPart :: forall v'1 v'2 v'3 t . (TensorType t) => 
                                +                       Tensor v'1 t -- ^ __input__
                                +                       -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__
                                +                       -> Tensor Build t -- ^ __band__
                                +batchMatrixBandPart = batchMatrixBandPart' id
                                +batchMatrixBandPart' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams ->
                                +                        Tensor v'1 t -- ^ __input__
                                +                        -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__
                                +                        -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__
                                +                        -> Tensor Build t -- ^ __band__
                                +batchMatrixBandPart' op'options input num_lower num_upper | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs num_lower,
                                +                                                             buildInputs num_upper]
                                +        return (opDef "BatchMatrixBandPart"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg { name: "num_lower" type: DT_INT64 }
                                +input_arg { name: "num_upper" type: DT_INT64 }
                                +output_arg { name: "band" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | 
                                +
                                +batchMatrixDeterminant :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                                  (Data.Complex.Complex Float),
                                +                                                  Double, Float] t) => 
                                +                          Tensor v'1 t -- ^ __input__
                                +                          -> Tensor Build t -- ^ __output__
                                +batchMatrixDeterminant = batchMatrixDeterminant' id
                                +batchMatrixDeterminant' :: forall v'1
                                +                           t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float), Double,
                                +                                        Float] t) => OpParams ->
                                +                           Tensor v'1 t -- ^ __input__
                                +                           -> Tensor Build t -- ^ __output__
                                +batchMatrixDeterminant' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchMatrixDeterminant"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchMatrixDiag :: forall v'1 t . (TensorType t) => 
                                +                   Tensor v'1 t -- ^ __diagonal__
                                +                   -> Tensor Build t -- ^ __output__
                                +batchMatrixDiag = batchMatrixDiag' id
                                +batchMatrixDiag' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                    Tensor v'1 t -- ^ __diagonal__
                                +                    -> Tensor Build t -- ^ __output__
                                +batchMatrixDiag' op'options diagonal | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonal]
                                +        return (opDef "BatchMatrixDiag"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "diagonal" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | 
                                +
                                +batchMatrixDiagPart :: forall v'1 t . (TensorType t) => 
                                +                       Tensor v'1 t -- ^ __input__
                                +                       -> Tensor Build t -- ^ __diagonal__
                                +batchMatrixDiagPart = batchMatrixDiagPart' id
                                +batchMatrixDiagPart' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                        Tensor v'1 t -- ^ __input__
                                +                        -> Tensor Build t -- ^ __diagonal__
                                +batchMatrixDiagPart' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchMatrixDiagPart"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "diagonal" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | 
                                +
                                +batchMatrixInverse :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +                      Tensor v'1 t -- ^ __input__
                                +                      -> Tensor Build t -- ^ __output__
                                +batchMatrixInverse = batchMatrixInverse' id
                                +batchMatrixInverse' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams ->
                                +                       Tensor v'1 t -- ^ __input__
                                +                       -> Tensor Build t -- ^ __output__
                                +batchMatrixInverse' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchMatrixInverse"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "adjoint" type: "bool" default_value { b: false } }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchMatrixSetDiag :: forall v'1 v'2 t . (TensorType t) => 
                                +                      Tensor v'1 t -- ^ __input__
                                +                      -> Tensor v'2 t -- ^ __diagonal__
                                +                      -> Tensor Build t -- ^ __output__
                                +batchMatrixSetDiag = batchMatrixSetDiag' id
                                +batchMatrixSetDiag' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +                       Tensor v'1 t -- ^ __input__
                                +                       -> Tensor v'2 t -- ^ __diagonal__
                                +                       -> Tensor Build t -- ^ __output__
                                +batchMatrixSetDiag' op'options input diagonal | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs diagonal]
                                +        return (opDef "BatchMatrixSetDiag"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg { name: "diagonal" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | 
                                +
                                +batchMatrixSolve :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +                    Tensor v'1 t -- ^ __matrix__
                                +                    -> Tensor v'2 t -- ^ __rhs__
                                +                    -> Tensor Build t -- ^ __output__
                                +batchMatrixSolve = batchMatrixSolve' id
                                +batchMatrixSolve' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) =>
                                +                     OpParams ->
                                +                     Tensor v'1 t -- ^ __matrix__
                                +                     -> Tensor v'2 t -- ^ __rhs__
                                +                     -> Tensor Build t -- ^ __output__
                                +batchMatrixSolve' op'options matrix rhs | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix,
                                +                                                             buildInputs rhs]
                                +        return (opDef "BatchMatrixSolve"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "matrix" type_attr: "T" }
                                +input_arg { name: "rhs" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "adjoint" type: "bool" default_value { b: false } }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchMatrixSolveLs :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => 
                                +                      Tensor v'1 t -- ^ __matrix__
                                +                      -> Tensor v'2 t -- ^ __rhs__
                                +                      -> Tensor v'3 Double -- ^ __l2_regularizer__
                                +                      -> Tensor Build t -- ^ __output__
                                +batchMatrixSolveLs = batchMatrixSolveLs' id
                                +batchMatrixSolveLs' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) =>
                                +                       OpParams ->
                                +                       Tensor v'1 t -- ^ __matrix__
                                +                       -> Tensor v'2 t -- ^ __rhs__
                                +                       -> Tensor v'3 Double -- ^ __l2_regularizer__
                                +                       -> Tensor Build t -- ^ __output__
                                +batchMatrixSolveLs' op'options matrix rhs l2_regularizer | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix,
                                +                                                             buildInputs rhs,
                                +                                                             buildInputs l2_regularizer]
                                +        return (opDef "BatchMatrixSolveLs"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "matrix" type_attr: "T" }
                                +input_arg { name: "rhs" type_attr: "T" }
                                +input_arg { name: "l2_regularizer" type: DT_DOUBLE }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
                                +}
                                +attr { name: "fast" type: "bool" default_value { b: true } }
                                +-}
                                +
                                +-- | 
                                +
                                +batchMatrixTriangularSolve :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +                              Tensor v'1 t -- ^ __matrix__
                                +                              -> Tensor v'2 t -- ^ __rhs__
                                +                              -> Tensor Build t -- ^ __output__
                                +batchMatrixTriangularSolve = batchMatrixTriangularSolve' id
                                +batchMatrixTriangularSolve' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) =>
                                +                               OpParams ->
                                +                               Tensor v'1 t -- ^ __matrix__
                                +                               -> Tensor v'2 t -- ^ __rhs__
                                +                               -> Tensor Build t -- ^ __output__
                                +batchMatrixTriangularSolve' op'options matrix rhs | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix,
                                +                                                             buildInputs rhs]
                                +        return (opDef "BatchMatrixTriangularSolve"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "matrix" type_attr: "T" }
                                +input_arg { name: "rhs" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "lower" type: "bool" default_value { b: true } }
                                +attr { name: "adjoint" type: "bool" default_value { b: false } }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | Batch normalization.
                                +--
                                +-- This op is deprecated. Prefer `tf.nn.batch_normalization`.
                                +batchNormWithGlobalNormalization :: forall v'1 v'2 v'3 v'4 v'5
                                +                                    t . (OneOf '[(Data.Complex.Complex Double),
                                +                                                 (Data.Complex.Complex Float),
                                +                                                 Data.Int.Int16, Data.Int.Int32,
                                +                                                 Data.Int.Int64, Data.Int.Int8,
                                +                                                 Data.Word.Word16,
                                +                                                 Data.Word.Word8, Double,
                                +                                                 Float] t) => 
                                +                                    Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
                                +                                         -- needs to be multiplied with gamma.
                                +                                    -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
                                +                                    -> Tensor v'1 t -- ^ __t__: A 4D input Tensor.
                                +                                    -> Tensor v'2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
                                +                                                    -- This is the first output from tf.nn.moments,
                                +                                                    -- or a saved moving average thereof.
                                +                                    -> Tensor v'3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
                                +                                                    -- This is the second output from tf.nn.moments,
                                +                                                    -- or a saved moving average thereof.
                                +                                    -> Tensor v'4 t -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t.
                                +                                                    -- An offset to be added to the normalized tensor.
                                +                                    -> Tensor v'5 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
                                +                                                    -- If "scale_after_normalization" is true, this tensor will be multiplied
                                +                                                    -- with the normalized tensor.
                                +                                    -> Tensor Build t -- ^ __result__
                                +batchNormWithGlobalNormalization = batchNormWithGlobalNormalization' id
                                +batchNormWithGlobalNormalization' :: forall v'1 v'2 v'3 v'4 v'5
                                +                                     t . (OneOf '[(Data.Complex.Complex Double),
                                +                                                  (Data.Complex.Complex Float),
                                +                                                  Data.Int.Int16,
                                +                                                  Data.Int.Int32,
                                +                                                  Data.Int.Int64, Data.Int.Int8,
                                +                                                  Data.Word.Word16,
                                +                                                  Data.Word.Word8, Double,
                                +                                                  Float] t) => OpParams ->
                                +                                     Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
                                +                                          -- needs to be multiplied with gamma.
                                +                                     -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
                                +                                     -> Tensor v'1 t -- ^ __t__: A 4D input Tensor.
                                +                                     -> Tensor v'2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
                                +                                                     -- This is the first output from tf.nn.moments,
                                +                                                     -- or a saved moving average thereof.
                                +                                     -> Tensor v'3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
                                +                                                     -- This is the second output from tf.nn.moments,
                                +                                                     -- or a saved moving average thereof.
                                +                                     -> Tensor v'4 t -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t.
                                +                                                     -- An offset to be added to the normalized tensor.
                                +                                     -> Tensor v'5 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
                                +                                                     -- If "scale_after_normalization" is true, this tensor will be multiplied
                                +                                                     -- with the normalized tensor.
                                +                                     -> Tensor Build t -- ^ __result__
                                +batchNormWithGlobalNormalization' op'options scale_after_normalization
                                +                                  variance_epsilon t m v beta
                                +                                  gamma | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t,
                                +                                                             buildInputs m,
                                +                                                             buildInputs v,
                                +                                                             buildInputs beta,
                                +                                                             buildInputs gamma]
                                +        return (opDef "BatchNormWithGlobalNormalization"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "scale_after_normalization" .~ scale_after_normalization
                                +                & opAttr "variance_epsilon" .~ variance_epsilon
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "t" description: "A 4D input Tensor." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "m"
                                +  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "v"
                                +  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "beta"
                                +  description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "gamma"
                                +  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "result" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "variance_epsilon"
                                +  type: "float"
                                +  description: "A small float number to avoid dividing by 0."
                                +}
                                +attr {
                                +  name: "scale_after_normalization"
                                +  type: "bool"
                                +  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
                                +}
                                +-}
                                +
                                +-- | Gradients for batch normalization.
                                +--
                                +-- This op is deprecated. See `tf.nn.batch_normalization`.
                                +batchNormWithGlobalNormalizationGrad :: forall v'1 v'2 v'3 v'4 v'5
                                +                                        t . (OneOf '[(Data.Complex.Complex Double),
                                +                                                     (Data.Complex.Complex Float),
                                +                                                     Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Int.Int64,
                                +                                                     Data.Int.Int8,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8, Double,
                                +                                                     Float] t) => 
                                +                                        Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
                                +                                             -- needs to be multiplied with gamma.
                                +                                        -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
                                +                                        -> Tensor v'1 t -- ^ __t__: A 4D input Tensor.
                                +                                        -> Tensor v'2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
                                +                                                        -- This is the first output from tf.nn.moments,
                                +                                                        -- or a saved moving average thereof.
                                +                                        -> Tensor v'3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
                                +                                                        -- This is the second output from tf.nn.moments,
                                +                                                        -- or a saved moving average thereof.
                                +                                        -> Tensor v'4 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
                                +                                                        -- If "scale_after_normalization" is true, this Tensor will be multiplied
                                +                                                        -- with the normalized Tensor.
                                +                                        -> Tensor v'5 t -- ^ __backprop__: 4D backprop Tensor.
                                +                                        -> (Tensor Build t, Tensor Build t,
                                +                                            Tensor Build t, Tensor Build t,
                                +                                            Tensor Build t)
                                +                                        -- ^ (__dx__, __dm__, __dv__, __db__, __dg__)
                                +                                        --
                                +                                        -- * __dx__: 4D backprop tensor for input.
                                +                                        --
                                +                                        -- * __dm__: 1D backprop tensor for mean.
                                +                                        --
                                +                                        -- * __dv__: 1D backprop tensor for variance.
                                +                                        --
                                +                                        -- * __db__: 1D backprop tensor for beta.
                                +                                        --
                                +                                        -- * __dg__: 1D backprop tensor for gamma.
                                +batchNormWithGlobalNormalizationGrad = batchNormWithGlobalNormalizationGrad' id
                                +batchNormWithGlobalNormalizationGrad' :: forall v'1 v'2 v'3 v'4 v'5
                                +                                         t . (OneOf '[(Data.Complex.Complex Double),
                                +                                                      (Data.Complex.Complex Float),
                                +                                                      Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t) => OpParams ->
                                +                                         Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
                                +                                              -- needs to be multiplied with gamma.
                                +                                         -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
                                +                                         -> Tensor v'1 t -- ^ __t__: A 4D input Tensor.
                                +                                         -> Tensor v'2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
                                +                                                         -- This is the first output from tf.nn.moments,
                                +                                                         -- or a saved moving average thereof.
                                +                                         -> Tensor v'3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
                                +                                                         -- This is the second output from tf.nn.moments,
                                +                                                         -- or a saved moving average thereof.
                                +                                         -> Tensor v'4 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
                                +                                                         -- If "scale_after_normalization" is true, this Tensor will be multiplied
                                +                                                         -- with the normalized Tensor.
                                +                                         -> Tensor v'5 t -- ^ __backprop__: 4D backprop Tensor.
                                +                                         -> (Tensor Build t, Tensor Build t,
                                +                                             Tensor Build t, Tensor Build t,
                                +                                             Tensor Build t)
                                +                                         -- ^ (__dx__, __dm__, __dv__, __db__, __dg__)
                                +                                         --
                                +                                         -- * __dx__: 4D backprop tensor for input.
                                +                                         --
                                +                                         -- * __dm__: 1D backprop tensor for mean.
                                +                                         --
                                +                                         -- * __dv__: 1D backprop tensor for variance.
                                +                                         --
                                +                                         -- * __db__: 1D backprop tensor for beta.
                                +                                         --
                                +                                         -- * __dg__: 1D backprop tensor for gamma.
                                +batchNormWithGlobalNormalizationGrad' op'options scale_after_normalization
                                +                                      variance_epsilon t m v gamma
                                +                                      backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t,
                                +                                                             buildInputs m,
                                +                                                             buildInputs v,
                                +                                                             buildInputs gamma,
                                +                                                             buildInputs backprop]
                                +        return (opDef "BatchNormWithGlobalNormalizationGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "scale_after_normalization" .~ scale_after_normalization
                                +                & opAttr "variance_epsilon" .~ variance_epsilon
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "t" description: "A 4D input Tensor." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "m"
                                +  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "v"
                                +  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "gamma"
                                +  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this Tensor will be multiplied\nwith the normalized Tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "backprop" description: "4D backprop Tensor." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "dx"
                                +  description: "4D backprop tensor for input."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "dm"
                                +  description: "1D backprop tensor for mean."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "dv"
                                +  description: "1D backprop tensor for variance."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "db"
                                +  description: "1D backprop tensor for beta."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "dg"
                                +  description: "1D backprop tensor for gamma."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "variance_epsilon"
                                +  type: "float"
                                +  description: "A small float number to avoid dividing by 0."
                                +}
                                +attr {
                                +  name: "scale_after_normalization"
                                +  type: "bool"
                                +  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchSelfAdjointEig :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +                       Tensor v'1 t -- ^ __input__
                                +                       -> Tensor Build t -- ^ __output__
                                +batchSelfAdjointEig = batchSelfAdjointEig' id
                                +batchSelfAdjointEig' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams ->
                                +                        Tensor v'1 t -- ^ __input__
                                +                        -> Tensor Build t -- ^ __output__
                                +batchSelfAdjointEig' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchSelfAdjointEig"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchSelfAdjointEigV2 :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +                         Tensor v'1 t -- ^ __input__
                                +                         -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__)
                                +                         --
                                +                         -- * __e__
                                +                         --
                                +                         -- * __v__
                                +batchSelfAdjointEigV2 = batchSelfAdjointEigV2' id
                                +batchSelfAdjointEigV2' :: forall v'1 t . (OneOf '[Double, Float] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 t -- ^ __input__
                                +                          -> (Tensor Build t, Tensor Build t)
                                +                          -- ^ (__e__, __v__)
                                +                          --
                                +                          -- * __e__
                                +                          --
                                +                          -- * __v__
                                +batchSelfAdjointEigV2' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchSelfAdjointEigV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "e" type_attr: "T" }
                                +output_arg { name: "v" type_attr: "T" }
                                +attr { name: "compute_v" type: "bool" default_value { b: true } }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +batchSvd :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float), Double,
                                +                                    Float] t) => 
                                +            Tensor v'1 t -- ^ __input__
                                +            -> (Tensor Build t, Tensor Build t, Tensor Build t)
                                +            -- ^ (__s__, __u__, __v__)
                                +            --
                                +            -- * __s__
                                +            --
                                +            -- * __u__
                                +            --
                                +            -- * __v__
                                +batchSvd = batchSvd' id
                                +batchSvd' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float), Double,
                                +                                     Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __input__
                                +             -> (Tensor Build t, Tensor Build t, Tensor Build t)
                                +             -- ^ (__s__, __u__, __v__)
                                +             --
                                +             -- * __s__
                                +             --
                                +             -- * __u__
                                +             --
                                +             -- * __v__
                                +batchSvd' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "BatchSvd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "s" type_attr: "T" }
                                +output_arg { name: "u" type_attr: "T" }
                                +output_arg { name: "v" type_attr: "T" }
                                +attr { name: "compute_uv" type: "bool" default_value { b: true } }
                                +attr {
                                +  name: "full_matrices" type: "bool" default_value { b: false }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_DOUBLE
                                +      type: DT_FLOAT
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | BatchToSpace for 4-D tensors of type T.
                                +--
                                +-- This is a legacy version of the more general BatchToSpaceND.
                                +-- 
                                +-- Rearranges (permutes) data from batch into blocks of spatial data, followed by
                                +-- cropping. This is the reverse transformation of SpaceToBatch. More specifically,
                                +-- this op outputs a copy of the input tensor where values from the `batch`
                                +-- dimension are moved in spatial blocks to the `height` and `width` dimensions,
                                +-- followed by cropping along the `height` and `width` dimensions.
                                +batchToSpace :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                               Data.Int.Int64] tidx) =>
                                +                
                                +                Data.Int.Int64 -- ^ __block_size__
                                +                -> Tensor v'1 t -- ^ __input__: 4-D tensor with shape
                                +                                -- `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
                                +                                --   depth]`. Note that the batch size of the input tensor must be divisible by
                                +                                -- `block_size * block_size`.
                                +                -> Tensor v'2 tidx -- ^ __crops__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
                                +                                   -- how many elements to crop from the intermediate result across the spatial
                                +                                   -- dimensions as follows:
                                +                                   -- 
                                +                                   --     crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
                                +                -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, height, width, depth]`, where:
                                +                -- 
                                +                --       height = height_pad - crop_top - crop_bottom
                                +                --       width = width_pad - crop_left - crop_right
                                +                -- 
                                +                -- The attr `block_size` must be greater than one. It indicates the block size.
                                +                -- 
                                +                -- Some examples:
                                +                -- 
                                +                -- (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
                                +                -- 
                                +                -- ```
                                +                -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
                                +                -- ```
                                +                -- 
                                +                -- The output tensor has shape `[1, 2, 2, 1]` and value:
                                +                -- 
                                +                -- ```
                                +                -- x = [[[[1], [2]], [[3], [4]]]]
                                +                -- ```
                                +                -- 
                                +                -- (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
                                +                -- 
                                +                -- ```
                                +                -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
                                +                -- ```
                                +                -- 
                                +                -- The output tensor has shape `[1, 2, 2, 3]` and value:
                                +                -- 
                                +                -- ```
                                +                -- x = [[[[1, 2, 3], [4, 5, 6]],
                                +                --       [[7, 8, 9], [10, 11, 12]]]]
                                +                -- ```
                                +                -- 
                                +                -- (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
                                +                -- 
                                +                -- ```
                                +                -- x = [[[[1], [3]], [[9], [11]]],
                                +                --      [[[2], [4]], [[10], [12]]],
                                +                --      [[[5], [7]], [[13], [15]]],
                                +                --      [[[6], [8]], [[14], [16]]]]
                                +                -- ```
                                +                -- 
                                +                -- The output tensor has shape `[1, 4, 4, 1]` and value:
                                +                -- 
                                +                -- ```
                                +                -- x = [[[1],   [2],  [3],  [4]],
                                +                --      [[5],   [6],  [7],  [8]],
                                +                --      [[9],  [10], [11],  [12]],
                                +                --      [[13], [14], [15],  [16]]]
                                +                -- ```
                                +                -- 
                                +                -- (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
                                +                -- 
                                +                -- ```
                                +                -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
                                +                --      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
                                +                -- ```
                                +                -- 
                                +                -- The output tensor has shape `[2, 2, 4, 1]` and value:
                                +                -- 
                                +                -- ```
                                +                -- x = [[[[1], [3]], [[5], [7]]],
                                +                --      [[[2], [4]], [[10], [12]]],
                                +                --      [[[5], [7]], [[13], [15]]],
                                +                --      [[[6], [8]], [[14], [16]]]]
                                +                -- ```
                                +batchToSpace = batchToSpace' id
                                +batchToSpace' :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                                Data.Int.Int64] tidx) =>
                                +                 OpParams ->
                                +                 Data.Int.Int64 -- ^ __block_size__
                                +                 -> Tensor v'1 t -- ^ __input__: 4-D tensor with shape
                                +                                 -- `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
                                +                                 --   depth]`. Note that the batch size of the input tensor must be divisible by
                                +                                 -- `block_size * block_size`.
                                +                 -> Tensor v'2 tidx -- ^ __crops__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
                                +                                    -- how many elements to crop from the intermediate result across the spatial
                                +                                    -- dimensions as follows:
                                +                                    -- 
                                +                                    --     crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
                                +                 -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, height, width, depth]`, where:
                                +                 -- 
                                +                 --       height = height_pad - crop_top - crop_bottom
                                +                 --       width = width_pad - crop_left - crop_right
                                +                 -- 
                                +                 -- The attr `block_size` must be greater than one. It indicates the block size.
                                +                 -- 
                                +                 -- Some examples:
                                +                 -- 
                                +                 -- (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
                                +                 -- 
                                +                 -- ```
                                +                 -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
                                +                 -- ```
                                +                 -- 
                                +                 -- The output tensor has shape `[1, 2, 2, 1]` and value:
                                +                 -- 
                                +                 -- ```
                                +                 -- x = [[[[1], [2]], [[3], [4]]]]
                                +                 -- ```
                                +                 -- 
                                +                 -- (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
                                +                 -- 
                                +                 -- ```
                                +                 -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
                                +                 -- ```
                                +                 -- 
                                +                 -- The output tensor has shape `[1, 2, 2, 3]` and value:
                                +                 -- 
                                +                 -- ```
                                +                 -- x = [[[[1, 2, 3], [4, 5, 6]],
                                +                 --       [[7, 8, 9], [10, 11, 12]]]]
                                +                 -- ```
                                +                 -- 
                                +                 -- (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
                                +                 -- 
                                +                 -- ```
                                +                 -- x = [[[[1], [3]], [[9], [11]]],
                                +                 --      [[[2], [4]], [[10], [12]]],
                                +                 --      [[[5], [7]], [[13], [15]]],
                                +                 --      [[[6], [8]], [[14], [16]]]]
                                +                 -- ```
                                +                 -- 
                                +                 -- The output tensor has shape `[1, 4, 4, 1]` and value:
                                +                 -- 
                                +                 -- ```
                                +                 -- x = [[[1],   [2],  [3],  [4]],
                                +                 --      [[5],   [6],  [7],  [8]],
                                +                 --      [[9],  [10], [11],  [12]],
                                +                 --      [[13], [14], [15],  [16]]]
                                +                 -- ```
                                +                 -- 
                                +                 -- (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
                                +                 -- 
                                +                 -- ```
                                +                 -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
                                +                 --      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
                                +                 -- ```
                                +                 -- 
                                +                 -- The output tensor has shape `[2, 2, 4, 1]` and value:
                                +                 -- 
                                +                 -- ```
                                +                 -- x = [[[[1], [3]], [[5], [7]]],
                                +                 --      [[[2], [4]], [[10], [12]]],
                                +                 --      [[[5], [7]], [[13], [15]]],
                                +                 --      [[[6], [8]], [[14], [16]]]]
                                +                 -- ```
                                +batchToSpace' op'options block_size input crops | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs crops]
                                +        return (opDef "BatchToSpace"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & opAttr "block_size" .~ block_size
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D tensor with shape\n`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n  depth]`. Note that the batch size of the input tensor must be divisible by\n`block_size * block_size`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "crops"
                                +  description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\nhow many elements to crop from the intermediate result across the spatial\ndimensions as follows:\n\n    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]"
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D with shape `[batch, height, width, depth]`, where:\n\n      height = height_pad - crop_top - crop_bottom\n      width = width_pad - crop_left - crop_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:\n\n```\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```"
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "block_size" type: "int" has_minimum: true minimum: 2
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | BatchToSpace for N-D tensors of type T.
                                +--
                                +-- This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
                                +-- `block_shape + [batch]`, interleaves these blocks back into the grid defined by
                                +-- the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
                                +-- the input.  The spatial dimensions of this intermediate result are then
                                +-- optionally cropped according to `crops` to produce the output.  This is the
                                +-- reverse of SpaceToBatch.  See below for a precise description.
                                +batchToSpaceND :: forall v'1 v'2 v'3 t tblock_shape tcrops . (TensorType t,
                                +                                                              OneOf '[Data.Int.Int32,
                                +                                                                      Data.Int.Int64] tblock_shape,
                                +                                                              OneOf '[Data.Int.Int32,
                                +                                                                      Data.Int.Int64] tcrops) =>
                                +                  
                                +                  Tensor v'1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
                                +                               -- where spatial_shape has M dimensions.
                                +                  -> Tensor v'2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1.
                                +                  -> Tensor v'3 tcrops -- ^ __crops__: 2-D with shape `[M, 2]`, all values must be >= 0.
                                +                                       --   `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
                                +                                       --   dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
                                +                                       --   required that
                                +                                       --   `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
                                +                                       -- 
                                +                                       -- This operation is equivalent to the following steps:
                                +                                       -- 
                                +                                       -- 1. Reshape `input` to `reshaped` of shape:
                                +                                       --      [block_shape[0], ..., block_shape[M-1],
                                +                                       --       batch / prod(block_shape),
                                +                                       --       input_shape[1], ..., input_shape[N-1]]
                                +                                       -- 
                                +                                       -- 2. Permute dimensions of `reshaped` to produce `permuted` of shape
                                +                                       --      [batch / prod(block_shape),
                                +                                       -- 
                                +                                       --       input_shape[1], block_shape[0],
                                +                                       --       ...,
                                +                                       --       input_shape[M], block_shape[M-1],
                                +                                       -- 
                                +                                       --       input_shape[M+1], ..., input_shape[N-1]]
                                +                                       -- 
                                +                                       -- 3. Reshape `permuted` to produce `reshaped_permuted` of shape
                                +                                       --      [batch / prod(block_shape),
                                +                                       -- 
                                +                                       --       input_shape[1] * block_shape[0],
                                +                                       --       ...,
                                +                                       --       input_shape[M] * block_shape[M-1],
                                +                                       -- 
                                +                                       --       input_shape[M+1],
                                +                                       --       ...,
                                +                                       --       input_shape[N-1]]
                                +                                       -- 
                                +                                       -- 4. Crop the start and end of dimensions `[1, ..., M]` of
                                +                                       --    `reshaped_permuted` according to `crops` to produce the output of shape:
                                +                                       --      [batch / prod(block_shape),
                                +                                       -- 
                                +                                       --       input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
                                +                                       --       ...,
                                +                                       --       input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
                                +                                       -- 
                                +                                       --       input_shape[M+1], ..., input_shape[N-1]]
                                +                                       -- 
                                +                                       -- Some examples:
                                +                                       -- 
                                +                                       -- (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
                                +                                       --     `crops = [[0, 0], [0, 0]]`:
                                +                                       -- 
                                +                                       -- ```
                                +                                       -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
                                +                                       -- ```
                                +                                       -- 
                                +                                       -- The output tensor has shape `[1, 2, 2, 1]` and value:
                                +                                       -- 
                                +                                       -- ```
                                +                                       -- x = [[[[1], [2]], [[3], [4]]]]
                                +                                       -- ```
                                +                                       -- 
                                +                                       -- (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
                                +                                       --     `crops = [[0, 0], [0, 0]]`:
                                +                                       -- 
                                +                                       -- ```
                                +                                       -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
                                +                                       -- ```
                                +                                       -- 
                                +                                       -- The output tensor has shape `[1, 2, 2, 3]` and value:
                                +                                       -- 
                                +                                       -- ```
                                +                                       -- x = [[[[1, 2, 3], [4, 5, 6]],
                                +                                       --       [[7, 8, 9], [10, 11, 12]]]]
                                +                                       -- ```
                                +                                       -- 
                                +                                       -- (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
                                +                                       --     `crops = [[0, 0], [0, 0]]`:
                                +                                       -- 
                                +                                       -- ```
                                +                                       -- x = [[[[1], [3]], [[9], [11]]],
                                +                                       --      [[[2], [4]], [[10], [12]]],
                                +                                       --      [[[5], [7]], [[13], [15]]],
                                +                                       --      [[[6], [8]], [[14], [16]]]]
                                +                                       -- ```
                                +                                       -- 
                                +                                       -- The output tensor has shape `[1, 4, 4, 1]` and value:
                                +                                       -- 
                                +                                       -- ```
                                +                                       -- x = [[[1],   [2],  [3],  [4]],
                                +                                       --      [[5],   [6],  [7],  [8]],
                                +                                       --      [[9],  [10], [11],  [12]],
                                +                                       --      [[13], [14], [15],  [16]]]
                                +                                       -- ```
                                +                                       -- 
                                +                                       -- (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
                                +                                       --     `crops = [[0, 0], [2, 0]]`:
                                +                                       -- 
                                +                                       -- ```
                                +                                       -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
                                +                                       --      [[[0], [2], [4]]], [[[0], [10], [12]]],
                                +                                       --      [[[0], [5], [7]]], [[[0], [13], [15]]],
                                +                                       --      [[[0], [6], [8]]], [[[0], [14], [16]]]]
                                +                                       -- ```
                                +                                       -- 
                                +                                       -- The output tensor has shape `[2, 2, 4, 1]` and value:
                                +                                       -- 
                                +                                       -- ```
                                +                                       -- x = [[[[1],   [2],  [3],  [4]],
                                +                                       --       [[5],   [6],  [7],  [8]]],
                                +                                       --      [[[9],  [10], [11],  [12]],
                                +                                       --       [[13], [14], [15],  [16]]]]
                                +                                       -- ```
                                +                  -> Tensor Build t -- ^ __output__
                                +batchToSpaceND = batchToSpaceND' id
                                +batchToSpaceND' :: forall v'1 v'2 v'3 t tblock_shape tcrops . (TensorType t,
                                +                                                               OneOf '[Data.Int.Int32,
                                +                                                                       Data.Int.Int64] tblock_shape,
                                +                                                               OneOf '[Data.Int.Int32,
                                +                                                                       Data.Int.Int64] tcrops) =>
                                +                   OpParams ->
                                +                   Tensor v'1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
                                +                                -- where spatial_shape has M dimensions.
                                +                   -> Tensor v'2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1.
                                +                   -> Tensor v'3 tcrops -- ^ __crops__: 2-D with shape `[M, 2]`, all values must be >= 0.
                                +                                        --   `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
                                +                                        --   dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
                                +                                        --   required that
                                +                                        --   `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
                                +                                        -- 
                                +                                        -- This operation is equivalent to the following steps:
                                +                                        -- 
                                +                                        -- 1. Reshape `input` to `reshaped` of shape:
                                +                                        --      [block_shape[0], ..., block_shape[M-1],
                                +                                        --       batch / prod(block_shape),
                                +                                        --       input_shape[1], ..., input_shape[N-1]]
                                +                                        -- 
                                +                                        -- 2. Permute dimensions of `reshaped` to produce `permuted` of shape
                                +                                        --      [batch / prod(block_shape),
                                +                                        -- 
                                +                                        --       input_shape[1], block_shape[0],
                                +                                        --       ...,
                                +                                        --       input_shape[M], block_shape[M-1],
                                +                                        -- 
                                +                                        --       input_shape[M+1], ..., input_shape[N-1]]
                                +                                        -- 
                                +                                        -- 3. Reshape `permuted` to produce `reshaped_permuted` of shape
                                +                                        --      [batch / prod(block_shape),
                                +                                        -- 
                                +                                        --       input_shape[1] * block_shape[0],
                                +                                        --       ...,
                                +                                        --       input_shape[M] * block_shape[M-1],
                                +                                        -- 
                                +                                        --       input_shape[M+1],
                                +                                        --       ...,
                                +                                        --       input_shape[N-1]]
                                +                                        -- 
                                +                                        -- 4. Crop the start and end of dimensions `[1, ..., M]` of
                                +                                        --    `reshaped_permuted` according to `crops` to produce the output of shape:
                                +                                        --      [batch / prod(block_shape),
                                +                                        -- 
                                +                                        --       input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
                                +                                        --       ...,
                                +                                        --       input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
                                +                                        -- 
                                +                                        --       input_shape[M+1], ..., input_shape[N-1]]
                                +                                        -- 
                                +                                        -- Some examples:
                                +                                        -- 
                                +                                        -- (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
                                +                                        --     `crops = [[0, 0], [0, 0]]`:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- The output tensor has shape `[1, 2, 2, 1]` and value:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1], [2]], [[3], [4]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
                                +                                        --     `crops = [[0, 0], [0, 0]]`:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- The output tensor has shape `[1, 2, 2, 3]` and value:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1, 2, 3], [4, 5, 6]],
                                +                                        --       [[7, 8, 9], [10, 11, 12]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
                                +                                        --     `crops = [[0, 0], [0, 0]]`:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1], [3]], [[9], [11]]],
                                +                                        --      [[[2], [4]], [[10], [12]]],
                                +                                        --      [[[5], [7]], [[13], [15]]],
                                +                                        --      [[[6], [8]], [[14], [16]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- The output tensor has shape `[1, 4, 4, 1]` and value:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[1],   [2],  [3],  [4]],
                                +                                        --      [[5],   [6],  [7],  [8]],
                                +                                        --      [[9],  [10], [11],  [12]],
                                +                                        --      [[13], [14], [15],  [16]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
                                +                                        --     `crops = [[0, 0], [2, 0]]`:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
                                +                                        --      [[[0], [2], [4]]], [[[0], [10], [12]]],
                                +                                        --      [[[0], [5], [7]]], [[[0], [13], [15]]],
                                +                                        --      [[[0], [6], [8]]], [[[0], [14], [16]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- The output tensor has shape `[2, 2, 4, 1]` and value:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1],   [2],  [3],  [4]],
                                +                                        --       [[5],   [6],  [7],  [8]]],
                                +                                        --      [[[9],  [10], [11],  [12]],
                                +                                        --       [[13], [14], [15],  [16]]]]
                                +                                        -- ```
                                +                   -> Tensor Build t -- ^ __output__
                                +batchToSpaceND' op'options input block_shape crops | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs block_shape,
                                +                                                             buildInputs crops]
                                +        return (opDef "BatchToSpaceND"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape)
                                +                & opAttr "Tcrops" .~ tensorType (undefined :: tcrops)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has M dimensions."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "block_shape"
                                +  description: "1-D with shape `[M]`, all values must be >= 1."
                                +  type_attr: "Tblock_shape"
                                +}
                                +input_arg {
                                +  name: "crops"
                                +  description: "2-D with shape `[M, 2]`, all values must be >= 0.\n  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input\n  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is\n  required that\n  `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n\nThis operation is equivalent to the following steps:\n\n1. Reshape `input` to `reshaped` of shape:\n     [block_shape[0], ..., block_shape[M-1],\n      batch / prod(block_shape),\n      input_shape[1], ..., input_shape[N-1]]\n\n2. Permute dimensions of `reshaped` to produce `permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1], block_shape[0],\n      ...,\n      input_shape[M], block_shape[M-1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\n3. Reshape `permuted` to produce `reshaped_permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0],\n      ...,\n      input_shape[M] * block_shape[M-1],\n\n      input_shape[M+1],\n      ...,\n      input_shape[N-1]]\n\n4. Crop the start and end of dimensions `[1, ..., M]` of\n   `reshaped_permuted` according to `crops` to produce the output of shape:\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\n      ...,\n      input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [2, 0]]`:\n\n```\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n     [[[0], [2], [4]]], [[[0], [10], [12]]],\n     [[[0], [5], [7]]], [[[0], [13], [15]]],\n     [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```"
                                +  type_attr: "Tcrops"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tblock_shape"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "Tcrops"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
                                +--
                                +-- The regularized incomplete beta integral is defined as:
                                +-- 
                                +-- 
                                +-- \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
                                +-- 
                                +-- where
                                +-- 
                                +-- 
                                +-- \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
                                +-- 
                                +-- 
                                +-- is the incomplete beta function and \\(B(a, b)\\) is the *complete*
                                +-- beta function.
                                +betainc :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __a__
                                +           -> Tensor v'2 t -- ^ __b__
                                +           -> Tensor v'3 t -- ^ __x__
                                +           -> Tensor Build t -- ^ __z__
                                +betainc = betainc' id
                                +betainc' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams ->
                                +            Tensor v'1 t -- ^ __a__
                                +            -> Tensor v'2 t -- ^ __b__
                                +            -> Tensor v'3 t -- ^ __x__
                                +            -> Tensor Build t -- ^ __z__
                                +betainc' op'options a b x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a,
                                +                                                             buildInputs b,
                                +                                                             buildInputs x]
                                +        return (opDef "Betainc"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "a" type_attr: "T" }
                                +input_arg { name: "b" type_attr: "T" }
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Adds `bias` to `value`.
                                +--
                                +-- This is a special case of `tf.add` where `bias` is restricted to be 1-D.
                                +-- Broadcasting is supported, so `value` may have any number of dimensions.
                                +biasAdd :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __value__: Any number of dimensions.
                                +           -> Tensor v'2 t -- ^ __bias__: 1-D with size the last dimension of `value`.
                                +           -> Tensor Build t -- ^ __output__: Broadcasted sum of `value` and `bias`.
                                +biasAdd = biasAdd' id
                                +biasAdd' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => OpParams ->
                                +            Tensor v'1 t -- ^ __value__: Any number of dimensions.
                                +            -> Tensor v'2 t -- ^ __bias__: 1-D with size the last dimension of `value`.
                                +            -> Tensor Build t -- ^ __output__: Broadcasted sum of `value` and `bias`.
                                +biasAdd' op'options value bias | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value,
                                +                                                             buildInputs bias]
                                +        return (opDef "BiasAdd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "Any number of dimensions."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "bias"
                                +  description: "1-D with size the last dimension of `value`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Broadcasted sum of `value` and `bias`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +-}
                                +
                                +-- | The backward operation for "BiasAdd" on the "bias" tensor.
                                +--
                                +-- It accumulates all the values from out_backprop into the feature dimension.
                                +-- For NHWC data format, the feature dimension is the last. For NCHW data format,
                                +-- the feature dimension is the third-to-last.
                                +biasAddGrad :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t) => 
                                +               Tensor v'1 t -- ^ __out_backprop__: Any number of dimensions.
                                +               -> Tensor Build t -- ^ __output__: 1-D with size the feature dimension of `out_backprop`.
                                +biasAddGrad = biasAddGrad' id
                                +biasAddGrad' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => OpParams ->
                                +                Tensor v'1 t -- ^ __out_backprop__: Any number of dimensions.
                                +                -> Tensor Build t -- ^ __output__: 1-D with size the feature dimension of `out_backprop`.
                                +biasAddGrad' op'options out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs out_backprop]
                                +        return (opDef "BiasAddGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "Any number of dimensions."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "1-D with size the feature dimension of `out_backprop`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +-}
                                +
                                +-- | Adds `bias` to `value`.
                                +--
                                +-- This is a deprecated version of BiasAdd and will be soon removed.
                                +-- 
                                +-- This is a special case of `tf.add` where `bias` is restricted to be 1-D.
                                +-- Broadcasting is supported, so `value` may have any number of dimensions.
                                +biasAddV1 :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t) => 
                                +             Tensor v'1 t -- ^ __value__: Any number of dimensions.
                                +             -> Tensor v'2 t -- ^ __bias__: 1-D with size the last dimension of `value`.
                                +             -> Tensor Build t -- ^ __output__: Broadcasted sum of `value` and `bias`.
                                +biasAddV1 = biasAddV1' id
                                +biasAddV1' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                          (Data.Complex.Complex Float),
                                +                                          Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t) => OpParams ->
                                +              Tensor v'1 t -- ^ __value__: Any number of dimensions.
                                +              -> Tensor v'2 t -- ^ __bias__: 1-D with size the last dimension of `value`.
                                +              -> Tensor Build t -- ^ __output__: Broadcasted sum of `value` and `bias`.
                                +biasAddV1' op'options value bias | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value,
                                +                                                             buildInputs bias]
                                +        return (opDef "BiasAddV1"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "Any number of dimensions."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "bias"
                                +  description: "1-D with size the last dimension of `value`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Broadcasted sum of `value` and `bias`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Counts the number of occurrences of each value in an integer array.
                                +--
                                +-- Outputs a vector with length `size` and the same dtype as `weights`. If
                                +-- `weights` are empty, then index `i` stores the number of times the value `i` is
                                +-- counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
                                +-- the value in `weights` at each index where the corresponding value in `arr` is
                                +-- `i`.
                                +-- 
                                +-- Values in `arr` outside of the range [0, size) are ignored.
                                +bincount :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                            Double, Float] t) => 
                                +            Tensor v'1 Data.Int.Int32 -- ^ __arr__: int32 `Tensor`.
                                +            -> Tensor v'2 Data.Int.Int32 -- ^ __size__: non-negative int32 scalar `Tensor`.
                                +            -> Tensor v'3 t -- ^ __weights__: is an int32, int64, float32, or float64 `Tensor` with the same
                                +                            -- shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
                                +                            -- equal to 1.
                                +            -> Tensor Build t -- ^ __bins__: 1D `Tensor` with length equal to `size`. The counts or summed weights for
                                +            -- each value in the range [0, size).
                                +bincount = bincount' id
                                +bincount' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                             Double, Float] t) => OpParams ->
                                +             Tensor v'1 Data.Int.Int32 -- ^ __arr__: int32 `Tensor`.
                                +             -> Tensor v'2 Data.Int.Int32 -- ^ __size__: non-negative int32 scalar `Tensor`.
                                +             -> Tensor v'3 t -- ^ __weights__: is an int32, int64, float32, or float64 `Tensor` with the same
                                +                             -- shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
                                +                             -- equal to 1.
                                +             -> Tensor Build t -- ^ __bins__: 1D `Tensor` with length equal to `size`. The counts or summed weights for
                                +             -- each value in the range [0, size).
                                +bincount' op'options arr size weights | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs arr,
                                +                                                             buildInputs size,
                                +                                                             buildInputs weights]
                                +        return (opDef "Bincount"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "arr" description: "int32 `Tensor`." type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "non-negative int32 scalar `Tensor`."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "weights"
                                +  description: "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "bins"
                                +  description: "1D `Tensor` with length equal to `size`. The counts or summed weights for\neach value in the range [0, size)."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Bitcasts a tensor from one type to another without copying data.
                                +--
                                +-- Given a tensor `input`, this operation returns a tensor that has the same buffer
                                +-- data as `input` with datatype `type`.
                                +-- 
                                +-- If the input datatype `T` is larger than the output datatype `type` then the
                                +-- shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
                                +-- 
                                +-- If `T` is smaller than `type`, the operator requires that the rightmost
                                +-- dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
                                +-- [..., sizeof(`type`)/sizeof(`T`)] to [...].
                                +-- 
                                +-- *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
                                +-- endian orderings will give different results.
                                +bitcast :: forall v'1 t type' . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t,
                                +                                 OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] type') => 
                                +           Tensor v'1 t -- ^ __input__
                                +           -> Tensor Build type' -- ^ __output__
                                +bitcast = bitcast' id
                                +bitcast' :: forall v'1 t type' . (OneOf '[(Data.Complex.Complex Double),
                                +                                          (Data.Complex.Complex Float),
                                +                                          Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t,
                                +                                  OneOf '[(Data.Complex.Complex Double),
                                +                                          (Data.Complex.Complex Float),
                                +                                          Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] type') => OpParams ->
                                +            Tensor v'1 t -- ^ __input__
                                +            -> Tensor Build type' -- ^ __output__
                                +bitcast' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Bitcast"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "type" .~ tensorType (undefined :: type')
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "type" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "type"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Elementwise computes the bitwise AND of `x` and `y`.
                                +--
                                +-- The result will have those bits set, that are set in both `x` and `y`. The
                                +-- computation is performed on the underlying representations of `x` and `y`.
                                +bitwiseAnd :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16,
                                +                                          Data.Word.Word8] t) => 
                                +              Tensor v'1 t -- ^ __x__
                                +              -> Tensor v'2 t -- ^ __y__
                                +              -> Tensor Build t -- ^ __z__
                                +bitwiseAnd = bitwiseAnd' id
                                +bitwiseAnd' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16,
                                +                                           Data.Word.Word8] t) => OpParams ->
                                +               Tensor v'1 t -- ^ __x__
                                +               -> Tensor v'2 t -- ^ __y__
                                +               -> Tensor Build t -- ^ __z__
                                +bitwiseAnd' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "BitwiseAnd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Elementwise computes the bitwise OR of `x` and `y`.
                                +--
                                +-- The result will have those bits set, that are set in `x`, `y` or both. The
                                +-- computation is performed on the underlying representations of `x` and `y`.
                                +bitwiseOr :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16,
                                +                                         Data.Word.Word8] t) => 
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor v'2 t -- ^ __y__
                                +             -> Tensor Build t -- ^ __z__
                                +bitwiseOr = bitwiseOr' id
                                +bitwiseOr' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16,
                                +                                          Data.Word.Word8] t) => OpParams ->
                                +              Tensor v'1 t -- ^ __x__
                                +              -> Tensor v'2 t -- ^ __y__
                                +              -> Tensor Build t -- ^ __z__
                                +bitwiseOr' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "BitwiseOr"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Elementwise computes the bitwise XOR of `x` and `y`.
                                +--
                                +-- The result will have those bits set, that are different in `x` and `y`. The
                                +-- computation is performed on the underlying representations of `x` and `y`.
                                +bitwiseXor :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16,
                                +                                          Data.Word.Word8] t) => 
                                +              Tensor v'1 t -- ^ __x__
                                +              -> Tensor v'2 t -- ^ __y__
                                +              -> Tensor Build t -- ^ __z__
                                +bitwiseXor = bitwiseXor' id
                                +bitwiseXor' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16,
                                +                                           Data.Word.Word8] t) => OpParams ->
                                +               Tensor v'1 t -- ^ __x__
                                +               -> Tensor v'2 t -- ^ __y__
                                +               -> Tensor Build t -- ^ __z__
                                +bitwiseXor' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "BitwiseXor"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Return the shape of s0 op s1 with broadcast.
                                +--
                                +-- Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
                                +-- broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
                                +broadcastArgs :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] t) => 
                                +                 Tensor v'1 t -- ^ __s0__
                                +                 -> Tensor v'2 t -- ^ __s1__
                                +                 -> Tensor Build t -- ^ __r0__
                                +broadcastArgs = broadcastArgs' id
                                +broadcastArgs' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32,
                                +                                              Data.Int.Int64] t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __s0__
                                +                  -> Tensor v'2 t -- ^ __s1__
                                +                  -> Tensor Build t -- ^ __r0__
                                +broadcastArgs' op'options s0 s1 | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs s0,
                                +                                                             buildInputs s1]
                                +        return (opDef "BroadcastArgs"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "s0" type_attr: "T" }
                                +input_arg { name: "s1" type_attr: "T" }
                                +output_arg { name: "r0" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Return the reduction indices for computing gradients of s0 op s1 with broadcast.
                                +--
                                +-- This is typically used by gradient computations for a broadcasting operation.
                                +broadcastGradientArgs :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] t) => 
                                +                         Tensor v'1 t -- ^ __s0__
                                +                         -> Tensor v'2 t -- ^ __s1__
                                +                         -> (Tensor Build t, Tensor Build t)
                                +                         -- ^ (__r0__, __r1__)
                                +                         --
                                +                         -- * __r0__
                                +                         --
                                +                         -- * __r1__
                                +broadcastGradientArgs = broadcastGradientArgs' id
                                +broadcastGradientArgs' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 t -- ^ __s0__
                                +                          -> Tensor v'2 t -- ^ __s1__
                                +                          -> (Tensor Build t, Tensor Build t)
                                +                          -- ^ (__r0__, __r1__)
                                +                          --
                                +                          -- * __r0__
                                +                          --
                                +                          -- * __r1__
                                +broadcastGradientArgs' op'options s0 s1 | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs s0,
                                +                                                             buildInputs s1]
                                +        return (opDef "BroadcastGradientArgs"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "s0" type_attr: "T" }
                                +input_arg { name: "s1" type_attr: "T" }
                                +output_arg { name: "r0" type_attr: "T" }
                                +output_arg { name: "r1" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Bucketizes 'input' based on 'boundaries'.
                                +--
                                +-- For example, if the inputs are
                                +--     boundaries = [0, 10, 100]
                                +--     input = [[-5, 10000]
                                +--              [150,   10]
                                +--              [5,    100]]
                                +-- 
                                +-- then the output will be
                                +--     output = [[0, 3]
                                +--               [3, 2]
                                +--               [1, 3]]
                                +bucketize :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double,
                                +                                     Float] t) => 
                                +             Tensor v'1 t -- ^ __input__: Any shape of Tensor contains with int or float type.
                                +             -> Tensor Build Data.Int.Int32 -- ^ __output__: Same shape with 'input', each value of input replaced with bucket index.
                                +             -- 
                                +             -- @compatibility(numpy)
                                +             -- Equivalent to np.digitize.
                                +             -- @end_compatibility
                                +bucketize = bucketize' id
                                +bucketize' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double,
                                +                                      Float] t) => OpParams ->
                                +              Tensor v'1 t -- ^ __input__: Any shape of Tensor contains with int or float type.
                                +              -> Tensor Build Data.Int.Int32 -- ^ __output__: Same shape with 'input', each value of input replaced with bucket index.
                                +              -- 
                                +              -- @compatibility(numpy)
                                +              -- Equivalent to np.digitize.
                                +              -- @end_compatibility
                                +bucketize' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Bucketize"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Any shape of Tensor contains with int or float type."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Same shape with \'input\', each value of input replaced with bucket index.\n\n@compatibility(numpy)\nEquivalent to np.digitize.\n@end_compatibility"
                                +  type: DT_INT32
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "boundaries"
                                +  type: "list(float)"
                                +  description: "A sorted list of floats gives the boundary of the buckets."
                                +}
                                +-}
                                +
                                +-- | Performs beam search decoding on the logits given in input.
                                +--
                                +-- A note about the attribute merge_repeated: For the beam search decoder,
                                +-- this means that if consecutive entries in a beam are the same, only
                                +-- the first of these is emitted.  That is, when the top path is "A B B B B",
                                +-- "A B" is returned if merge_repeated = True but "A B B B B" is
                                +-- returned if merge_repeated = False.
                                +cTCBeamSearchDecoder :: 
                                +                        Data.Int.Int64 -- ^ __beam_width__: A scalar >= 0 (beam search beam width).
                                +                        -> Data.Int.Int64 -- ^ __top_paths__: A scalar >= 0, <= beam_width (controls output size).
                                +                        -> Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
                                +                        -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch)`.
                                +                        -> ([Tensor Build Data.Int.Int64],
                                +                            [Tensor Build Data.Int.Int64],
                                +                            [Tensor Build Data.Int.Int64], Tensor Build Float)
                                +                        -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)
                                +                        --
                                +                        -- * __decoded_indices__: A list (length: top_paths) of indices matrices.  Matrix j,
                                +                        -- size `(total_decoded_outputs[j] x 2)`, has indices of a
                                +                        -- `SparseTensor<int64, 2>`.  The rows store: [batch, time].
                                +                        --
                                +                        -- * __decoded_values__: A list (length: top_paths) of values vectors.  Vector j,
                                +                        -- size `(length total_decoded_outputs[j])`, has the values of a
                                +                        -- `SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j.
                                +                        --
                                +                        -- * __decoded_shape__: A list (length: top_paths) of shape vector.  Vector j,
                                +                        -- size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
                                +                        -- Its values are: `[batch_size, max_decoded_length[j]]`.
                                +                        --
                                +                        -- * __log_probability__: A matrix, shaped: `(batch_size x top_paths)`.  The
                                +                        -- sequence log-probabilities.
                                +cTCBeamSearchDecoder = cTCBeamSearchDecoder' id
                                +cTCBeamSearchDecoder' :: OpParams ->
                                +                         Data.Int.Int64 -- ^ __beam_width__: A scalar >= 0 (beam search beam width).
                                +                         -> Data.Int.Int64 -- ^ __top_paths__: A scalar >= 0, <= beam_width (controls output size).
                                +                         -> Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
                                +                         -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch)`.
                                +                         -> ([Tensor Build Data.Int.Int64],
                                +                             [Tensor Build Data.Int.Int64],
                                +                             [Tensor Build Data.Int.Int64], Tensor Build Float)
                                +                         -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)
                                +                         --
                                +                         -- * __decoded_indices__: A list (length: top_paths) of indices matrices.  Matrix j,
                                +                         -- size `(total_decoded_outputs[j] x 2)`, has indices of a
                                +                         -- `SparseTensor<int64, 2>`.  The rows store: [batch, time].
                                +                         --
                                +                         -- * __decoded_values__: A list (length: top_paths) of values vectors.  Vector j,
                                +                         -- size `(length total_decoded_outputs[j])`, has the values of a
                                +                         -- `SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j.
                                +                         --
                                +                         -- * __decoded_shape__: A list (length: top_paths) of shape vector.  Vector j,
                                +                         -- size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
                                +                         -- Its values are: `[batch_size, max_decoded_length[j]]`.
                                +                         --
                                +                         -- * __log_probability__: A matrix, shaped: `(batch_size x top_paths)`.  The
                                +                         -- sequence log-probabilities.
                                +cTCBeamSearchDecoder' op'options beam_width top_paths inputs
                                +                      sequence_length | eqLengthGuard [] =
                                +    pureOp [top_paths, top_paths, top_paths] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs,
                                +                                                             buildInputs sequence_length]
                                +        return (opDef "CTCBeamSearchDecoder"
                                +                & opAttr "beam_width" .~ beam_width
                                +                & opAttr "top_paths" .~ top_paths
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "sequence_length"
                                +  description: "A vector containing sequence lengths, size `(batch)`."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "decoded_indices"
                                +  description: "A list (length: top_paths) of indices matrices.  Matrix j,\nsize `(total_decoded_outputs[j] x 2)`, has indices of a\n`SparseTensor<int64, 2>`.  The rows store: [batch, time]."
                                +  type: DT_INT64
                                +  number_attr: "top_paths"
                                +}
                                +output_arg {
                                +  name: "decoded_values"
                                +  description: "A list (length: top_paths) of values vectors.  Vector j,\nsize `(length total_decoded_outputs[j])`, has the values of a\n`SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j."
                                +  type: DT_INT64
                                +  number_attr: "top_paths"
                                +}
                                +output_arg {
                                +  name: "decoded_shape"
                                +  description: "A list (length: top_paths) of shape vector.  Vector j,\nsize `(2)`, stores the shape of the decoded `SparseTensor[j]`.\nIts values are: `[batch_size, max_decoded_length[j]]`."
                                +  type: DT_INT64
                                +  number_attr: "top_paths"
                                +}
                                +output_arg {
                                +  name: "log_probability"
                                +  description: "A matrix, shaped: `(batch_size x top_paths)`.  The\nsequence log-probabilities."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "beam_width"
                                +  type: "int"
                                +  description: "A scalar >= 0 (beam search beam width)."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "top_paths"
                                +  type: "int"
                                +  description: "A scalar >= 0, <= beam_width (controls output size)."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "merge_repeated"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If true, merge repeated classes in output."
                                +}
                                +-}
                                +
                                +-- | Performs greedy decoding on the logits given in inputs.
                                +--
                                +-- A note about the attribute merge_repeated: if enabled, when
                                +-- consecutive logits' maximum indices are the same, only the first of
                                +-- these is emitted.  Labeling the blank '*', the sequence "A B B * B B"
                                +-- becomes "A B B" if merge_repeated = True and "A B B B B" if
                                +-- merge_repeated = False.
                                +-- 
                                +-- Regardless of the value of merge_repeated, if the maximum index of a given
                                +-- time and batch corresponds to the blank, index `(num_classes - 1)`, no new
                                +-- element is emitted.
                                +cTCGreedyDecoder :: 
                                +                    Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch_size)`.
                                +                    -> (Tensor Build Data.Int.Int64,
                                +                        Tensor Build Data.Int.Int64,
                                +                        Tensor Build Data.Int.Int64, Tensor Build Float)
                                +                    -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)
                                +                    --
                                +                    -- * __decoded_indices__: Indices matrix, size `(total_decoded_outputs x 2)`,
                                +                    -- of a `SparseTensor<int64, 2>`.  The rows store: [batch, time].
                                +                    --
                                +                    -- * __decoded_values__: Values vector, size: `(total_decoded_outputs)`,
                                +                    -- of a `SparseTensor<int64, 2>`.  The vector stores the decoded classes.
                                +                    --
                                +                    -- * __decoded_shape__: Shape vector, size `(2)`, of the decoded SparseTensor.
                                +                    -- Values are: `[batch_size, max_decoded_length]`.
                                +                    --
                                +                    -- * __log_probability__: Matrix, size `(batch_size x 1)`, containing sequence
                                +                    -- log-probabilities.
                                +cTCGreedyDecoder = cTCGreedyDecoder' id
                                +cTCGreedyDecoder' :: OpParams ->
                                +                     Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
                                +                     -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch_size)`.
                                +                     -> (Tensor Build Data.Int.Int64,
                                +                         Tensor Build Data.Int.Int64,
                                +                         Tensor Build Data.Int.Int64, Tensor Build Float)
                                +                     -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)
                                +                     --
                                +                     -- * __decoded_indices__: Indices matrix, size `(total_decoded_outputs x 2)`,
                                +                     -- of a `SparseTensor<int64, 2>`.  The rows store: [batch, time].
                                +                     --
                                +                     -- * __decoded_values__: Values vector, size: `(total_decoded_outputs)`,
                                +                     -- of a `SparseTensor<int64, 2>`.  The vector stores the decoded classes.
                                +                     --
                                +                     -- * __decoded_shape__: Shape vector, size `(2)`, of the decoded SparseTensor.
                                +                     -- Values are: `[batch_size, max_decoded_length]`.
                                +                     --
                                +                     -- * __log_probability__: Matrix, size `(batch_size x 1)`, containing sequence
                                +                     -- log-probabilities.
                                +cTCGreedyDecoder' op'options inputs sequence_length | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs,
                                +                                                             buildInputs sequence_length]
                                +        return (opDef "CTCGreedyDecoder"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "sequence_length"
                                +  description: "A vector containing sequence lengths, size `(batch_size)`."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "decoded_indices"
                                +  description: "Indices matrix, size `(total_decoded_outputs x 2)`,\nof a `SparseTensor<int64, 2>`.  The rows store: [batch, time]."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "decoded_values"
                                +  description: "Values vector, size: `(total_decoded_outputs)`,\nof a `SparseTensor<int64, 2>`.  The vector stores the decoded classes."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "decoded_shape"
                                +  description: "Shape vector, size `(2)`, of the decoded SparseTensor.\nValues are: `[batch_size, max_decoded_length]`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "log_probability"
                                +  description: "Matrix, size `(batch_size x 1)`, containing sequence\nlog-probabilities."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "merge_repeated"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, merge repeated classes in output."
                                +}
                                +-}
                                +
                                +-- | Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
                                +--
                                +-- the gradient.  This class performs the softmax operation for you, so inputs
                                +-- should be e.g. linear projections of outputs by an LSTM.
                                +cTCLoss :: 
                                +           Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
                                +           -> Tensor v'2 Data.Int.Int64 -- ^ __labels_indices__: The indices of a `SparseTensor<int32, 2>`.
                                +                                        -- `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
                                +                                        -- `(batch b, time t)`.
                                +           -> Tensor v'3 Data.Int.Int32 -- ^ __labels_values__: The values (labels) associated with the given batch and time.
                                +           -> Tensor v'4 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths (batch).
                                +           -> (Tensor Build Float, Tensor Build Float)
                                +           -- ^ (__loss__, __gradient__)
                                +           --
                                +           -- * __loss__: A vector (batch) containing log-probabilities.
                                +           --
                                +           -- * __gradient__: The gradient of `loss`.  3-D, shape:
                                +           -- `(max_time x batch_size x num_classes)`.
                                +cTCLoss = cTCLoss' id
                                +cTCLoss' :: OpParams ->
                                +            Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
                                +            -> Tensor v'2 Data.Int.Int64 -- ^ __labels_indices__: The indices of a `SparseTensor<int32, 2>`.
                                +                                         -- `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
                                +                                         -- `(batch b, time t)`.
                                +            -> Tensor v'3 Data.Int.Int32 -- ^ __labels_values__: The values (labels) associated with the given batch and time.
                                +            -> Tensor v'4 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths (batch).
                                +            -> (Tensor Build Float, Tensor Build Float)
                                +            -- ^ (__loss__, __gradient__)
                                +            --
                                +            -- * __loss__: A vector (batch) containing log-probabilities.
                                +            --
                                +            -- * __gradient__: The gradient of `loss`.  3-D, shape:
                                +            -- `(max_time x batch_size x num_classes)`.
                                +cTCLoss' op'options inputs labels_indices labels_values
                                +         sequence_length | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs,
                                +                                                             buildInputs labels_indices,
                                +                                                             buildInputs labels_values,
                                +                                                             buildInputs sequence_length]
                                +        return (opDef "CTCLoss"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "labels_indices"
                                +  description: "The indices of a `SparseTensor<int32, 2>`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "labels_values"
                                +  description: "The values (labels) associated with the given batch and time."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "sequence_length"
                                +  description: "A vector containing sequence lengths (batch)."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "loss"
                                +  description: "A vector (batch) containing log-probabilities."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "gradient"
                                +  description: "The gradient of `loss`.  3-D, shape:\n`(max_time x batch_size x num_classes)`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "preprocess_collapse_repeated"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation."
                                +}
                                +attr {
                                +  name: "ctc_merge_repeated"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "Scalar.  If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels.  This is a simplified version of CTC."
                                +}
                                +attr {
                                +  name: "ignore_longer_outputs_than_inputs"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Scalar. If set to true, during CTC\ncalculation, items that have longer output sequences than input sequences\nare skipped: they don\'t contribute to the loss term and have zero-gradient."
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that caches elements from `input_dataset`.
                                +--
                                +-- A CacheDataset will iterate over the input_dataset, and store tensors. If the
                                +-- cache already exists, the cache will be used. If the cache is inappropriate
                                +-- (e.g. cannot be opened, contains tensors of the wrong shape / size), an error
                                +-- will the returned when used.
                                +cacheDataset :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                [DataType] -- ^ __output_types__
                                +                -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__: A path on the filesystem where we should cache the dataset. Note: this
                                +                                                         -- will be a directory.
                                +                -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +cacheDataset = cacheDataset' id
                                +cacheDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                 [DataType] -- ^ __output_types__
                                +                 -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                 -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__: A path on the filesystem where we should cache the dataset. Note: this
                                +                                                          -- will be a directory.
                                +                 -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +cacheDataset' op'options output_types input_dataset
                                +              filename | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset,
                                +                                                             buildInputs filename]
                                +        buildOp [] (opDef "CacheDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input_dataset" type: DT_RESOURCE }
                                +input_arg {
                                +  name: "filename"
                                +  description: "A path on the filesystem where we should cache the dataset. Note: this\nwill be a directory."
                                +  type: DT_STRING
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Cast x of type SrcT to y of DstT.
                                +
                                +cast :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => 
                                +        Tensor v'1 srcT -- ^ __x__
                                +        -> Tensor Build dstT -- ^ __y__
                                +cast = cast' id
                                +cast' :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) =>
                                +         OpParams ->
                                +         Tensor v'1 srcT -- ^ __x__
                                +         -> Tensor Build dstT -- ^ __y__
                                +cast' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Cast"
                                +                & opAttr "SrcT" .~ tensorType (undefined :: srcT)
                                +                & opAttr "DstT" .~ tensorType (undefined :: dstT)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "SrcT" }
                                +output_arg { name: "y" type_attr: "DstT" }
                                +attr { name: "SrcT" type: "type" }
                                +attr { name: "DstT" type: "type" }
                                +-}
                                +
                                +-- | Returns element-wise smallest integer in not less than x.
                                +
                                +ceil :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +ceil = ceil' id
                                +ceil' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +         OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +ceil' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Ceil"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Checks a tensor for NaN and Inf values.
                                +--
                                +-- When run, reports an `InvalidArgument` error if `tensor` has any values
                                +-- that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
                                +checkNumerics :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +                 Tensor v'1 t -- ^ __tensor__
                                +                 -> Tensor Build t -- ^ __output__
                                +checkNumerics = checkNumerics' id
                                +checkNumerics' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +                  OpParams ->
                                +                  Tensor v'1 t -- ^ __tensor__
                                +                  -> Tensor Build t -- ^ __output__
                                +checkNumerics' op'options tensor | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor]
                                +        return (opDef "CheckNumerics"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "tensor" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "message"
                                +  type: "string"
                                +  description: "Prefix of the error message."
                                +}
                                +-}
                                +
                                +-- | Computes the Cholesky decomposition of one or more square matrices.
                                +--
                                +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
                                +-- form square matrices.
                                +-- 
                                +-- The input has to be symmetric and positive definite. Only the lower-triangular
                                +-- part of the input will be used for this operation. The upper-triangular part
                                +-- will not be read.
                                +-- 
                                +-- The output is a tensor of the same shape as the input
                                +-- containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
                                +-- 
                                +-- **Note**: The gradient computation on GPU is faster for large matrices but
                                +-- not for large batch dimensions when the submatrices are small. In this
                                +-- case it might be faster to use the CPU.
                                +cholesky :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float), Double,
                                +                                    Float] t) => 
                                +            Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`.
                                +            -> Tensor Build t -- ^ __output__: Shape is `[..., M, M]`.
                                +cholesky = cholesky' id
                                +cholesky' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float), Double,
                                +                                     Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`.
                                +             -> Tensor Build t -- ^ __output__: Shape is `[..., M, M]`.
                                +cholesky' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Cholesky"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "Shape is `[..., M, M]`." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Shape is `[..., M, M]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_DOUBLE
                                +      type: DT_FLOAT
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
                                +--
                                +-- For an explanation see "Differentiation of the Cholesky algorithm" by
                                +-- Iain Murray http://arxiv.org/abs/1602.07527.
                                +choleskyGrad :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +                Tensor v'1 t -- ^ __l__: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
                                +                             -- Algorithm depends only on lower triangular part of the innermost matrices of
                                +                             -- this tensor.
                                +                -> Tensor v'2 t -- ^ __grad__: df/dl where f is some scalar function. Shape is `[..., M, M]`.
                                +                                -- Algorithm depends only on lower triangular part of the innermost matrices of
                                +                                -- this tensor.
                                +                -> Tensor Build t -- ^ __output__: Symmetrized version of df/dA . Shape is `[..., M, M]`
                                +choleskyGrad = choleskyGrad' id
                                +choleskyGrad' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams ->
                                +                 Tensor v'1 t -- ^ __l__: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
                                +                              -- Algorithm depends only on lower triangular part of the innermost matrices of
                                +                              -- this tensor.
                                +                 -> Tensor v'2 t -- ^ __grad__: df/dl where f is some scalar function. Shape is `[..., M, M]`.
                                +                                 -- Algorithm depends only on lower triangular part of the innermost matrices of
                                +                                 -- this tensor.
                                +                 -> Tensor Build t -- ^ __output__: Symmetrized version of df/dA . Shape is `[..., M, M]`
                                +choleskyGrad' op'options l grad | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs l,
                                +                                                             buildInputs grad]
                                +        return (opDef "CholeskyGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "l"
                                +  description: "Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad"
                                +  description: "df/dl where f is some scalar function. Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Symmetrized version of df/dA . Shape is `[..., M, M]`"
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Converts two real numbers to a complex number.
                                +--
                                +-- Given a tensor `real` representing the real part of a complex number, and a
                                +-- tensor `imag` representing the imaginary part of a complex number, this
                                +-- operation returns complex numbers elementwise of the form \\(a + bj\\), where
                                +-- *a* represents the `real` part and *b* represents the `imag` part.
                                +-- 
                                +-- The input tensors `real` and `imag` must have the same shape.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor 'real' is [2.25, 3.25]
                                +-- # tensor `imag` is [4.75, 5.75]
                                +-- tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
                                +-- ```
                                +complex :: forall v'1 v'2 t tout . (OneOf '[Double, Float] t,
                                +                                    OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float)] tout) =>
                                +           
                                +           Tensor v'1 t -- ^ __real__
                                +           -> Tensor v'2 t -- ^ __imag__
                                +           -> Tensor Build tout -- ^ __out__
                                +complex = complex' id
                                +complex' :: forall v'1 v'2 t tout . (OneOf '[Double, Float] t,
                                +                                     OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float)] tout) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __real__
                                +            -> Tensor v'2 t -- ^ __imag__
                                +            -> Tensor Build tout -- ^ __out__
                                +complex' op'options real imag | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs real,
                                +                                                             buildInputs imag]
                                +        return (opDef "Complex"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "real" type_attr: "T" }
                                +input_arg { name: "imag" type_attr: "T" }
                                +output_arg { name: "out" type_attr: "Tout" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "Tout"
                                +  type: "type"
                                +  default_value { type: DT_COMPLEX64 }
                                +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
                                +}
                                +-}
                                +
                                +-- | Computes the complex absolute value of a tensor.
                                +--
                                +-- Given a tensor `x` of complex numbers, this operation returns a tensor of type
                                +-- `float` or `double` that is the absolute value of each element in `x`. All
                                +-- elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
                                +-- value is computed as \\( \sqrt{a^2 + b^2}\\).
                                +complexAbs :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double),
                                +                                           (Data.Complex.Complex Float)] t,
                                +                                   OneOf '[Double, Float] tout) => 
                                +              Tensor v'1 t -- ^ __x__
                                +              -> Tensor Build tout -- ^ __y__
                                +complexAbs = complexAbs' id
                                +complexAbs' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float)] t,
                                +                                    OneOf '[Double, Float] tout) => OpParams ->
                                +               Tensor v'1 t -- ^ __x__
                                +               -> Tensor Build tout -- ^ __y__
                                +complexAbs' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "ComplexAbs"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "Tout" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_COMPLEX64 }
                                +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
                                +}
                                +attr {
                                +  name: "Tout"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Computes the ids of the positions in sampled_candidates that match true_labels.
                                +--
                                +-- When doing log-odds NCE, the result of this op should be passed through a
                                +-- SparseToDense op, then added to the logits of the sampled candidates. This has
                                +-- the effect of 'removing' the sampled labels that match the true labels by
                                +-- making the classifier sure that they are sampled labels.
                                +computeAccidentalHits :: 
                                +                         Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                         -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: The true_classes output of UnpackSparseLabels.
                                +                         -> Tensor v'2 Data.Int.Int64 -- ^ __sampled_candidates__: The sampled_candidates output of CandidateSampler.
                                +                         -> (Tensor Build Data.Int.Int32,
                                +                             Tensor Build Data.Int.Int64, Tensor Build Float)
                                +                         -- ^ (__indices__, __ids__, __weights__)
                                +                         --
                                +                         -- * __indices__: A vector of indices corresponding to rows of true_candidates.
                                +                         --
                                +                         -- * __ids__: A vector of IDs of positions in sampled_candidates that match a true_label
                                +                         -- for the row with the corresponding index in indices.
                                +                         --
                                +                         -- * __weights__: A vector of the same length as indices and ids, in which each element
                                +                         -- is -FLOAT_MAX.
                                +computeAccidentalHits = computeAccidentalHits' id
                                +computeAccidentalHits' :: OpParams ->
                                +                          Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                          -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: The true_classes output of UnpackSparseLabels.
                                +                          -> Tensor v'2 Data.Int.Int64 -- ^ __sampled_candidates__: The sampled_candidates output of CandidateSampler.
                                +                          -> (Tensor Build Data.Int.Int32,
                                +                              Tensor Build Data.Int.Int64, Tensor Build Float)
                                +                          -- ^ (__indices__, __ids__, __weights__)
                                +                          --
                                +                          -- * __indices__: A vector of indices corresponding to rows of true_candidates.
                                +                          --
                                +                          -- * __ids__: A vector of IDs of positions in sampled_candidates that match a true_label
                                +                          -- for the row with the corresponding index in indices.
                                +                          --
                                +                          -- * __weights__: A vector of the same length as indices and ids, in which each element
                                +                          -- is -FLOAT_MAX.
                                +computeAccidentalHits' op'options num_true true_classes
                                +                       sampled_candidates | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes,
                                +                                                             buildInputs sampled_candidates]
                                +        return (opDef "ComputeAccidentalHits"
                                +                & opAttr "num_true" .~ num_true
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "true_classes"
                                +  description: "The true_classes output of UnpackSparseLabels."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sampled_candidates"
                                +  description: "The sampled_candidates output of CandidateSampler."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "indices"
                                +  description: "A vector of indices corresponding to rows of true_candidates."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "ids"
                                +  description: "A vector of IDs of positions in sampled_candidates that match a true_label\nfor the row with the corresponding index in indices."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "weights"
                                +  description: "A vector of the same length as indices and ids, in which each element\nis -FLOAT_MAX."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "num_true"
                                +  type: "int"
                                +  description: "Number of true labels per context."
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +-}
                                +
                                +-- | Concatenates tensors along one dimension.
                                +
                                +concat :: forall v'1 v'2 t . (TensorType t) => 
                                +          Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: 0-D.  The dimension along which to concatenate.  Must be in the
                                +                                    -- range [0, rank(values)).
                                +          -> [Tensor v'2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match,
                                +                            -- and their sizes must match in all dimensions except `concat_dim`.
                                +          -> Tensor Build t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the
                                +          -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
                                +          -- in `concat_dim` where it has the sum of the sizes.
                                +concat = concat' id
                                +concat' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +           Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: 0-D.  The dimension along which to concatenate.  Must be in the
                                +                                     -- range [0, rank(values)).
                                +           -> [Tensor v'2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match,
                                +                             -- and their sizes must match in all dimensions except `concat_dim`.
                                +           -> Tensor Build t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the
                                +           -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
                                +           -- in `concat_dim` where it has the sum of the sizes.
                                +concat' op'options concat_dim
                                +        values | eqLengthGuard [("N", [("values", length values)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat_dim,
                                +                                                             buildInputs values]
                                +        return (opDef "Concat"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length values) :: Int64
                                +{-
                                +input_arg {
                                +  name: "concat_dim"
                                +  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
                                +  type_attr: "T"
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 2 }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Computes offsets of concat inputs within its output.
                                +--
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 'x' is [2, 2, 7]
                                +-- # 'y' is [2, 3, 7]
                                +-- # 'z' is [2, 5, 7]
                                +-- concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
                                +-- ```
                                +-- 
                                +-- This is typically used by gradient computations for a concat operation.
                                +concatOffset :: 
                                +                Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: The dimension along which to concatenate.
                                +                -> [Tensor v'2 Data.Int.Int32] -- ^ __shape__: The `N` int32 vectors representing shape of tensors being concatenated.
                                +                -> [Tensor Build Data.Int.Int32] -- ^ __offset__: The `N` int32 vectors representing the starting offset
                                +                -- of input tensors within the concatenated output.
                                +concatOffset = concatOffset' id
                                +concatOffset' :: OpParams ->
                                +                 Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: The dimension along which to concatenate.
                                +                 -> [Tensor v'2 Data.Int.Int32] -- ^ __shape__: The `N` int32 vectors representing shape of tensors being concatenated.
                                +                 -> [Tensor Build Data.Int.Int32] -- ^ __offset__: The `N` int32 vectors representing the starting offset
                                +                 -- of input tensors within the concatenated output.
                                +concatOffset' op'options concat_dim
                                +              shape | eqLengthGuard [("N", [("shape", length shape)])] =
                                +    pureOp [n] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat_dim,
                                +                                                             buildInputs shape]
                                +        return (opDef "ConcatOffset"
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length shape) :: Int64
                                +{-
                                +input_arg {
                                +  name: "concat_dim"
                                +  description: "The dimension along which to concatenate."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "shape"
                                +  description: "The `N` int32 vectors representing shape of tensors being concatenated."
                                +  type: DT_INT32
                                +  number_attr: "N"
                                +}
                                +output_arg {
                                +  name: "offset"
                                +  description: "The `N` int32 vectors representing the starting offset\nof input tensors within the concatenated output."
                                +  type: DT_INT32
                                +  number_attr: "N"
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 2 }
                                +-}
                                +
                                +-- | Concatenates tensors along one dimension.
                                +
                                +concatV2 :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                           Data.Int.Int64] tidx) =>
                                +            
                                +            [Tensor v'1 t] -- ^ __values__: List of `N` Tensors to concatenate. Their ranks and types must match,
                                +                           -- and their sizes must match in all dimensions except `concat_dim`.
                                +            -> Tensor v'2 tidx -- ^ __axis__: 0-D.  The dimension along which to concatenate.  Must be in the
                                +                               -- range [-rank(values), rank(values)).
                                +            -> Tensor Build t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the
                                +            -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
                                +            -- in `concat_dim` where it has the sum of the sizes.
                                +concatV2 = concatV2' id
                                +concatV2' :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                            Data.Int.Int64] tidx) =>
                                +             OpParams ->
                                +             [Tensor v'1 t] -- ^ __values__: List of `N` Tensors to concatenate. Their ranks and types must match,
                                +                            -- and their sizes must match in all dimensions except `concat_dim`.
                                +             -> Tensor v'2 tidx -- ^ __axis__: 0-D.  The dimension along which to concatenate.  Must be in the
                                +                                -- range [-rank(values), rank(values)).
                                +             -> Tensor Build t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the
                                +             -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
                                +             -- in `concat_dim` where it has the sum of the sizes.
                                +concatV2' op'options values
                                +          axis | eqLengthGuard [("N", [("values", length values)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values,
                                +                                                             buildInputs axis]
                                +        return (opDef "ConcatV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length values) :: Int64
                                +{-
                                +input_arg {
                                +  name: "values"
                                +  description: "List of `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +}
                                +input_arg {
                                +  name: "axis"
                                +  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [-rank(values), rank(values))."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
                                +  type_attr: "T"
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 2 }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that concatenates `input_dataset` with `another_dataset`.
                                +
                                +concatenateDataset :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                      [DataType] -- ^ __output_types__
                                +                      -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                      -> Tensor v'2 ResourceHandle -- ^ __another_dataset__
                                +                      -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +concatenateDataset = concatenateDataset' id
                                +concatenateDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                       [DataType] -- ^ __output_types__
                                +                       -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                       -> Tensor v'2 ResourceHandle -- ^ __another_dataset__
                                +                       -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +concatenateDataset' op'options output_types input_dataset
                                +                    another_dataset | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset,
                                +                                                             buildInputs another_dataset]
                                +        buildOp [] (opDef "ConcatenateDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input_dataset" type: DT_RESOURCE }
                                +input_arg { name: "another_dataset" type: DT_RESOURCE }
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | A conditional accumulator for aggregating gradients.
                                +--
                                +-- The accumulator accepts gradients marked with local_step greater or
                                +-- equal to the most recent global_step known to the accumulator. The
                                +-- average can be extracted from the accumulator, provided sufficient
                                +-- gradients have been accumulated. Extracting the average automatically
                                +-- resets the aggregate to 0, and increments the global_step recorded by
                                +-- the accumulator.
                                +conditionalAccumulator :: forall m' . (MonadBuild m') => 
                                +                          DataType -- ^ __dtype__: The type of the value being accumulated.
                                +                          -> Shape -- ^ __shape__: The shape of the values, can be [], in which case shape is unknown.
                                +                          -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the accumulator.
                                +conditionalAccumulator = conditionalAccumulator' id
                                +conditionalAccumulator' :: forall m' . (MonadBuild m') => OpParams ->
                                +                           DataType -- ^ __dtype__: The type of the value being accumulated.
                                +                           -> Shape -- ^ __shape__: The shape of the values, can be [], in which case shape is unknown.
                                +                           -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the accumulator.
                                +conditionalAccumulator' op'options dtype shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "ConditionalAccumulator"
                                +                    & opAttr "dtype" .~ dtype
                                +                    & opAttr "shape" .~ shape
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the accumulator."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the value being accumulated."
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  description: "The shape of the values, can be [], in which case shape is unknown."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this accumulator will be shared under the\ngiven name across multiple sessions."
                                +}
                                +-}
                                +
                                +-- | Returns the complex conjugate of a complex number.
                                +--
                                +-- Given a tensor `input` of complex numbers, this operation returns a tensor of
                                +-- complex numbers that are the complex conjugate of each element in `input`. The
                                +-- complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
                                +-- real part and *b* is the imaginary part.
                                +-- 
                                +-- The complex conjugate returned by this operation is of the form \\(a - bj\\).
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
                                +-- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
                                +-- ```
                                +conj :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float)] t) => 
                                +        Tensor v'1 t -- ^ __input__
                                +        -> Tensor Build t -- ^ __output__
                                +conj = conj' id
                                +conj' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float)] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __input__
                                +         -> Tensor Build t -- ^ __output__
                                +conj' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Conj"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_COMPLEX64 }
                                +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
                                +}
                                +-}
                                +
                                +-- | Returns a constant tensor.
                                +
                                +const :: forall dtype . (TensorType dtype) => 
                                +         Tensor Build dtype -- ^ __output__
                                +const = const' id
                                +const' :: forall dtype . (TensorType dtype) => OpParams ->
                                +          Tensor Build dtype -- ^ __output__
                                +const' op'options | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        return (opDef "Const"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "output" type_attr: "dtype" }
                                +attr {
                                +  name: "value"
                                +  type: "tensor"
                                +  description: "Attr `value` is the tensor to return."
                                +}
                                +attr { name: "dtype" type: "type" }
                                +-}
                                +
                                +-- | Does nothing. Serves as a control trigger for scheduling.
                                +--
                                +-- Only useful as a placeholder for control edges.
                                +controlTrigger :: forall m' . (MonadBuild m') => 
                                +                  m' (ControlNode)
                                +controlTrigger = controlTrigger' id
                                +controlTrigger' :: forall m' . (MonadBuild m') => OpParams ->
                                +                   m' (ControlNode)
                                +controlTrigger' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "ControlTrigger"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +
                                +-}
                                +
                                +-- | Computes a 2-D convolution given 4-D `input` and `filter` tensors.
                                +--
                                +-- Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
                                +-- and a filter / kernel tensor of shape
                                +-- `[filter_height, filter_width, in_channels, out_channels]`, this op
                                +-- performs the following:
                                +-- 
                                +-- 1. Flattens the filter to a 2-D matrix with shape
                                +--    `[filter_height * filter_width * in_channels, output_channels]`.
                                +-- 2. Extracts image patches from the input tensor to form a *virtual*
                                +--    tensor of shape `[batch, out_height, out_width,
                                +--    filter_height * filter_width * in_channels]`.
                                +-- 3. For each patch, right-multiplies the filter matrix and the image patch
                                +--    vector.
                                +-- 
                                +-- In detail, with the default NHWC format,
                                +-- 
                                +--     output[b, i, j, k] =
                                +--         sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
                                +--                         filter[di, dj, q, k]
                                +-- 
                                +-- Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
                                +-- horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
                                +conv2D :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => 
                                +          Tensor v'1 t -- ^ __input__: A 4-D tensor. The dimension order is interpreted according to the value
                                +                       -- of `data_format`, see below for details.
                                +          -> Tensor v'2 t -- ^ __filter__: A 4-D tensor of shape
                                +                          -- `[filter_height, filter_width, in_channels, out_channels]`
                                +          -> Tensor Build t -- ^ __output__: A 4-D tensor. The dimension order is determined by the value of
                                +          -- `data_format`, see below for details.
                                +conv2D = conv2D' id
                                +conv2D' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) =>
                                +           OpParams ->
                                +           Tensor v'1 t -- ^ __input__: A 4-D tensor. The dimension order is interpreted according to the value
                                +                        -- of `data_format`, see below for details.
                                +           -> Tensor v'2 t -- ^ __filter__: A 4-D tensor of shape
                                +                           -- `[filter_height, filter_width, in_channels, out_channels]`
                                +           -> Tensor Build t -- ^ __output__: A 4-D tensor. The dimension order is determined by the value of
                                +           -- `data_format`, see below for details.
                                +conv2D' op'options input filter | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter]
                                +        return (opDef "Conv2D"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "A 4-D tensor. The dimension order is interpreted according to the value\nof `data_format`, see below for details."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "A 4-D tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`"
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A 4-D tensor. The dimension order is determined by the value of\n`data_format`, see below for details."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_HALF type: DT_FLOAT } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 4.  The stride of the sliding window for each\ndimension of `input`. The dimension order is determined by the value of\n  `data_format`, see below for details."
                                +}
                                +attr {
                                +  name: "use_cudnn_on_gpu" type: "bool" default_value { b: true }
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, channels, height, width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradients of convolution with respect to the filter.
                                +
                                +conv2DBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16,
                                +                                                        Float] t) => 
                                +                        Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
                                +                        -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
                                +                                                     -- where `filter` is a 4-D
                                +                                                     -- `[filter_height, filter_width, in_channels, out_channels]` tensor.
                                +                        -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
                                +                                        -- Gradients w.r.t. the output of the convolution.
                                +                        -> Tensor Build t -- ^ __output__: 4-D with shape
                                +                        -- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
                                +                        -- the `filter` input of the convolution.
                                +conv2DBackpropFilter = conv2DBackpropFilter' id
                                +conv2DBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16,
                                +                                                         Float] t) =>
                                +                         OpParams ->
                                +                         Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
                                +                         -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
                                +                                                      -- where `filter` is a 4-D
                                +                                                      -- `[filter_height, filter_width, in_channels, out_channels]` tensor.
                                +                         -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
                                +                                         -- Gradients w.r.t. the output of the convolution.
                                +                         -> Tensor Build t -- ^ __output__: 4-D with shape
                                +                         -- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
                                +                         -- the `filter` input of the convolution.
                                +conv2DBackpropFilter' op'options input filter_sizes
                                +                      out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter_sizes,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "Conv2DBackpropFilter"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter_sizes"
                                +  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, out_channels]` tensor."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_HALF type: DT_FLOAT } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
                                +}
                                +attr {
                                +  name: "use_cudnn_on_gpu" type: "bool" default_value { b: true }
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradients of convolution with respect to the input.
                                +
                                +conv2DBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16,
                                +                                                       Float] t) => 
                                +                       Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`,
                                +                                                 -- where `input` is a 4-D `[batch, height, width, channels]` tensor.
                                +                       -> Tensor v'2 t -- ^ __filter__: 4-D with shape
                                +                                       -- `[filter_height, filter_width, in_channels, out_channels]`.
                                +                       -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
                                +                                       -- Gradients w.r.t. the output of the convolution.
                                +                       -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
                                +                       -- w.r.t. the input of the convolution.
                                +conv2DBackpropInput = conv2DBackpropInput' id
                                +conv2DBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16,
                                +                                                        Float] t) => OpParams ->
                                +                        Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`,
                                +                                                  -- where `input` is a 4-D `[batch, height, width, channels]` tensor.
                                +                        -> Tensor v'2 t -- ^ __filter__: 4-D with shape
                                +                                        -- `[filter_height, filter_width, in_channels, out_channels]`.
                                +                        -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
                                +                                        -- Gradients w.r.t. the output of the convolution.
                                +                        -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
                                +                        -- w.r.t. the input of the convolution.
                                +conv2DBackpropInput' op'options input_sizes filter
                                +                     out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_sizes,
                                +                                                             buildInputs filter,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "Conv2DBackpropInput"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_sizes"
                                +  description: "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient\nw.r.t. the input of the convolution."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_HALF type: DT_FLOAT } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
                                +}
                                +attr {
                                +  name: "use_cudnn_on_gpu" type: "bool" default_value { b: true }
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +-}
                                +
                                +-- | Computes a 3-D convolution given 5-D `input` and `filter` tensors.
                                +--
                                +-- In signal processing, cross-correlation is a measure of similarity of
                                +-- two waveforms as a function of a time-lag applied to one of them. This
                                +-- is also known as a sliding dot product or sliding inner-product.
                                +-- 
                                +-- Our Conv3D implements a form of cross-correlation.
                                +conv3D :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +          Tensor v'1 t -- ^ __input__: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
                                +          -> Tensor v'2 t -- ^ __filter__: Shape `[filter_depth, filter_height, filter_width, in_channels,
                                +                          -- out_channels]`. `in_channels` must match between `input` and `filter`.
                                +          -> Tensor Build t -- ^ __output__
                                +conv3D = conv3D' id
                                +conv3D' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams ->
                                +           Tensor v'1 t -- ^ __input__: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
                                +           -> Tensor v'2 t -- ^ __filter__: Shape `[filter_depth, filter_height, filter_width, in_channels,
                                +                           -- out_channels]`. `in_channels` must match between `input` and `filter`.
                                +           -> Tensor Build t -- ^ __output__
                                +conv3D' op'options input filter | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter]
                                +        return (opDef "Conv3D"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Shape `[batch, in_depth, in_height, in_width, in_channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "Shape `[filter_depth, filter_height, filter_width, in_channels,\nout_channels]`. `in_channels` must match between `input` and `filter`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NDHWC" }
                                +  description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
                                +  allowed_values { list { s: "NDHWC" s: "NCDHW" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradients of 3-D convolution with respect to the filter.
                                +
                                +conv3DBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => 
                                +                        Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
                                +                        -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
                                +                                        -- `in_channels` must match between `input` and `filter`.
                                +                        -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
                                +                                        -- out_channels]`.
                                +                        -> Tensor Build t -- ^ __output__
                                +conv3DBackpropFilter = conv3DBackpropFilter' id
                                +conv3DBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) =>
                                +                         OpParams ->
                                +                         Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
                                +                         -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
                                +                                         -- `in_channels` must match between `input` and `filter`.
                                +                         -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
                                +                                         -- out_channels]`.
                                +                         -> Tensor Build t -- ^ __output__
                                +conv3DBackpropFilter' op'options input filter out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "Conv3DBackpropFilter"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Shape `[batch, depth, rows, cols, in_channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradients of 3-D convolution with respect to the filter.
                                +
                                +conv3DBackpropFilterV2 :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => 
                                +                          Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
                                +                          -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
                                +                                                       -- where `filter` is a 5-D
                                +                                                       -- `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
                                +                                                       -- tensor.
                                +                          -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
                                +                                          -- out_channels]`.
                                +                          -> Tensor Build t -- ^ __output__
                                +conv3DBackpropFilterV2 = conv3DBackpropFilterV2' id
                                +conv3DBackpropFilterV2' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) =>
                                +                           OpParams ->
                                +                           Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
                                +                           -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
                                +                                                        -- where `filter` is a 5-D
                                +                                                        -- `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
                                +                                                        -- tensor.
                                +                           -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
                                +                                           -- out_channels]`.
                                +                           -> Tensor Build t -- ^ __output__
                                +conv3DBackpropFilterV2' op'options input filter_sizes
                                +                        out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter_sizes,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "Conv3DBackpropFilterV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Shape `[batch, depth, rows, cols, in_channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter_sizes"
                                +  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 5-D\n`[filter_depth, filter_height, filter_width, in_channels, out_channels]`\ntensor."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NDHWC" }
                                +  description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
                                +  allowed_values { list { s: "NDHWC" s: "NCDHW" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradients of 3-D convolution with respect to the input.
                                +
                                +conv3DBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => 
                                +                       Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
                                +                       -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
                                +                                       -- `in_channels` must match between `input` and `filter`.
                                +                       -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
                                +                                       -- out_channels]`.
                                +                       -> Tensor Build t -- ^ __output__
                                +conv3DBackpropInput = conv3DBackpropInput' id
                                +conv3DBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) =>
                                +                        OpParams ->
                                +                        Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
                                +                        -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
                                +                                        -- `in_channels` must match between `input` and `filter`.
                                +                        -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
                                +                                        -- out_channels]`.
                                +                        -> Tensor Build t -- ^ __output__
                                +conv3DBackpropInput' op'options input filter out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "Conv3DBackpropInput"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Shape `[batch, depth, rows, cols, in_channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradients of 3-D convolution with respect to the input.
                                +
                                +conv3DBackpropInputV2 :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => 
                                +                         Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the tensor shape of `input`,
                                +                                                   -- where `input` is a 5-D
                                +                                                   -- `[batch, depth, rows, cols, in_channels]` tensor.
                                +                         -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
                                +                                         -- `in_channels` must match between `input` and `filter`.
                                +                         -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
                                +                                         -- out_channels]`.
                                +                         -> Tensor Build t -- ^ __output__
                                +conv3DBackpropInputV2 = conv3DBackpropInputV2' id
                                +conv3DBackpropInputV2' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the tensor shape of `input`,
                                +                                                    -- where `input` is a 5-D
                                +                                                    -- `[batch, depth, rows, cols, in_channels]` tensor.
                                +                          -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
                                +                                          -- `in_channels` must match between `input` and `filter`.
                                +                          -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
                                +                                          -- out_channels]`.
                                +                          -> Tensor Build t -- ^ __output__
                                +conv3DBackpropInputV2' op'options input_sizes filter
                                +                       out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_sizes,
                                +                                                             buildInputs filter,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "Conv3DBackpropInputV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_sizes"
                                +  description: "An integer vector representing the tensor shape of `input`,\nwhere `input` is a 5-D\n`[batch, depth, rows, cols, in_channels]` tensor."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NDHWC" }
                                +  description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
                                +  allowed_values { list { s: "NDHWC" s: "NCDHW" } }
                                +}
                                +-}
                                +
                                +-- | Computes cos of x element-wise.
                                +
                                +cos :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Data.Word.Word16,
                                +                               Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor Build t -- ^ __y__
                                +cos = cos' id
                                +cos' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                Double, Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +cos' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Cos"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes hyperbolic cosine of x element-wise.
                                +
                                +cosh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +cosh = cosh' id
                                +cosh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +cosh' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Cosh"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Increments 'ref' until it reaches 'limit'.
                                +
                                +countUpTo :: forall t m' . (MonadBuild m', OneOf '[Data.Int.Int32,
                                +                                                   Data.Int.Int64] t) => 
                                +             Data.Int.Int64 -- ^ __limit__: If incrementing ref would bring it above limit, instead generates an
                                +                            -- 'OutOfRange' error.
                                +             -> Tensor Ref t -- ^ __ref__: Should be from a scalar `Variable` node.
                                +             -> m' (Tensor Value t) -- ^ __output__: A copy of the input before increment. If nothing else modifies the
                                +             -- input, the values produced will all be distinct.
                                +countUpTo = countUpTo' id
                                +countUpTo' :: forall t m' . (MonadBuild m', OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] t) =>
                                +              OpParams ->
                                +              Data.Int.Int64 -- ^ __limit__: If incrementing ref would bring it above limit, instead generates an
                                +                             -- 'OutOfRange' error.
                                +              -> Tensor Ref t -- ^ __ref__: Should be from a scalar `Variable` node.
                                +              -> m' (Tensor Value t) -- ^ __output__: A copy of the input before increment. If nothing else modifies the
                                +              -- input, the values produced will all be distinct.
                                +countUpTo' op'options limit ref | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref]
                                +        buildOp [] (opDef "CountUpTo"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "limit" .~ limit
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a scalar `Variable` node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "limit"
                                +  type: "int"
                                +  description: "If incrementing ref would bring it above limit, instead generates an\n\'OutOfRange\' error."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Extracts crops from the input image tensor and bilinearly resizes them (possibly
                                +--
                                +-- with aspect ratio change) to a common output size specified by `crop_size`. This
                                +-- is more general than the `crop_to_bounding_box` op which extracts a fixed size
                                +-- slice from the input image and does not allow resizing or aspect ratio change.
                                +-- 
                                +-- Returns a tensor with `crops` from the input `image` at positions defined at the
                                +-- bounding box locations in `boxes`. The cropped boxes are all resized (with
                                +-- bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
                                +-- result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.
                                +cropAndResize :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Int.Int64,
                                +                                                     Data.Int.Int8,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8, Double,
                                +                                                     Float] t) => 
                                +                 Tensor v'1 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
                                +                              -- Both `image_height` and `image_width` need to be positive.
                                +                 -> Tensor v'2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
                                +                                     -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
                                +                                     -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
                                +                                     -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
                                +                                     -- `[0, 1]` interval of normalized image height is mapped to
                                +                                     -- `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
                                +                                     -- which case the sampled crop is an up-down flipped version of the original
                                +                                     -- image. The width dimension is treated similarly. Normalized coordinates
                                +                                     -- outside the `[0, 1]` range are allowed, in which case we use
                                +                                     -- `extrapolation_value` to extrapolate the input image values.
                                +                 -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
                                +                                              -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
                                +                 -> Tensor v'4 Data.Int.Int32 -- ^ __crop_size__: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
                                +                                              -- cropped image patches are resized to this size. The aspect ratio of the image
                                +                                              -- content is not preserved. Both `crop_height` and `crop_width` need to be
                                +                                              -- positive.
                                +                 -> Tensor Build Float -- ^ __crops__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
                                +cropAndResize = cropAndResize' id
                                +cropAndResize' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
                                +                               -- Both `image_height` and `image_width` need to be positive.
                                +                  -> Tensor v'2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
                                +                                      -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
                                +                                      -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
                                +                                      -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
                                +                                      -- `[0, 1]` interval of normalized image height is mapped to
                                +                                      -- `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
                                +                                      -- which case the sampled crop is an up-down flipped version of the original
                                +                                      -- image. The width dimension is treated similarly. Normalized coordinates
                                +                                      -- outside the `[0, 1]` range are allowed, in which case we use
                                +                                      -- `extrapolation_value` to extrapolate the input image values.
                                +                  -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
                                +                                               -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
                                +                  -> Tensor v'4 Data.Int.Int32 -- ^ __crop_size__: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
                                +                                               -- cropped image patches are resized to this size. The aspect ratio of the image
                                +                                               -- content is not preserved. Both `crop_height` and `crop_width` need to be
                                +                                               -- positive.
                                +                  -> Tensor Build Float -- ^ __crops__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
                                +cropAndResize' op'options image boxes box_ind crop_size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image,
                                +                                                             buildInputs boxes,
                                +                                                             buildInputs box_ind,
                                +                                                             buildInputs crop_size]
                                +        return (opDef "CropAndResize"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "image"
                                +  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "boxes"
                                +  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "box_ind"
                                +  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "crop_size"
                                +  description: "A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All\ncropped image patches are resized to this size. The aspect ratio of the image\ncontent is not preserved. Both `crop_height` and `crop_width` need to be\npositive."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "crops"
                                +  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "method"
                                +  type: "string"
                                +  default_value { s: "bilinear" }
                                +  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
                                +  allowed_values { list { s: "bilinear" } }
                                +}
                                +attr {
                                +  name: "extrapolation_value"
                                +  type: "float"
                                +  default_value { f: 0.0 }
                                +  description: "Value used for extrapolation, when applicable."
                                +}
                                +-}
                                +
                                +-- | Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
                                +
                                +cropAndResizeGradBoxes :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                              Data.Int.Int32,
                                +                                                              Data.Int.Int64,
                                +                                                              Data.Int.Int8,
                                +                                                              Data.Word.Word16,
                                +                                                              Data.Word.Word8,
                                +                                                              Double,
                                +                                                              Float] t) => 
                                +                          Tensor v'1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
                                +                          -> Tensor v'2 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
                                +                                          -- Both `image_height` and `image_width` need to be positive.
                                +                          -> Tensor v'3 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
                                +                                              -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
                                +                                              -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
                                +                                              -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
                                +                                              -- `[0, 1]` interval of normalized image height is mapped to
                                +                                              -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
                                +                                              -- which case the sampled crop is an up-down flipped version of the original
                                +                                              -- image. The width dimension is treated similarly. Normalized coordinates
                                +                                              -- outside the `[0, 1]` range are allowed, in which case we use
                                +                                              -- `extrapolation_value` to extrapolate the input image values.
                                +                          -> Tensor v'4 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
                                +                                                       -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
                                +                          -> Tensor Build Float -- ^ __output__: A 2-D tensor of shape `[num_boxes, 4]`.
                                +cropAndResizeGradBoxes = cropAndResizeGradBoxes' id
                                +cropAndResizeGradBoxes' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                               Data.Int.Int32,
                                +                                                               Data.Int.Int64,
                                +                                                               Data.Int.Int8,
                                +                                                               Data.Word.Word16,
                                +                                                               Data.Word.Word8,
                                +                                                               Double,
                                +                                                               Float] t) =>
                                +                           OpParams ->
                                +                           Tensor v'1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
                                +                           -> Tensor v'2 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
                                +                                           -- Both `image_height` and `image_width` need to be positive.
                                +                           -> Tensor v'3 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
                                +                                               -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
                                +                                               -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
                                +                                               -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
                                +                                               -- `[0, 1]` interval of normalized image height is mapped to
                                +                                               -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
                                +                                               -- which case the sampled crop is an up-down flipped version of the original
                                +                                               -- image. The width dimension is treated similarly. Normalized coordinates
                                +                                               -- outside the `[0, 1]` range are allowed, in which case we use
                                +                                               -- `extrapolation_value` to extrapolate the input image values.
                                +                           -> Tensor v'4 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
                                +                                                        -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
                                +                           -> Tensor Build Float -- ^ __output__: A 2-D tensor of shape `[num_boxes, 4]`.
                                +cropAndResizeGradBoxes' op'options grads image boxes
                                +                        box_ind | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads,
                                +                                                             buildInputs image,
                                +                                                             buildInputs boxes,
                                +                                                             buildInputs box_ind]
                                +        return (opDef "CropAndResizeGradBoxes"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "grads"
                                +  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "image"
                                +  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "boxes"
                                +  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "box_ind"
                                +  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A 2-D tensor of shape `[num_boxes, 4]`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "method"
                                +  type: "string"
                                +  default_value { s: "bilinear" }
                                +  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
                                +  allowed_values { list { s: "bilinear" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradient of the crop_and_resize op wrt the input image tensor.
                                +
                                +cropAndResizeGradImage :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16,
                                +                                                              Double,
                                +                                                              Float] t) => 
                                +                          Tensor v'1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
                                +                          -> Tensor v'2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
                                +                                              -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
                                +                                              -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
                                +                                              -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
                                +                                              -- `[0, 1]` interval of normalized image height is mapped to
                                +                                              -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
                                +                                              -- which case the sampled crop is an up-down flipped version of the original
                                +                                              -- image. The width dimension is treated similarly. Normalized coordinates
                                +                                              -- outside the `[0, 1]` range are allowed, in which case we use
                                +                                              -- `extrapolation_value` to extrapolate the input image values.
                                +                          -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
                                +                                                       -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
                                +                          -> Tensor v'4 Data.Int.Int32 -- ^ __image_size__: A 1-D tensor with value `[batch, image_height, image_width, depth]`
                                +                                                       -- containing the original image size. Both `image_height` and `image_width` need
                                +                                                       -- to be positive.
                                +                          -> Tensor Build t -- ^ __output__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
                                +cropAndResizeGradImage = cropAndResizeGradImage' id
                                +cropAndResizeGradImage' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16,
                                +                                                               Double,
                                +                                                               Float] t) =>
                                +                           OpParams ->
                                +                           Tensor v'1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
                                +                           -> Tensor v'2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
                                +                                               -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
                                +                                               -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
                                +                                               -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
                                +                                               -- `[0, 1]` interval of normalized image height is mapped to
                                +                                               -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
                                +                                               -- which case the sampled crop is an up-down flipped version of the original
                                +                                               -- image. The width dimension is treated similarly. Normalized coordinates
                                +                                               -- outside the `[0, 1]` range are allowed, in which case we use
                                +                                               -- `extrapolation_value` to extrapolate the input image values.
                                +                           -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
                                +                                                        -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
                                +                           -> Tensor v'4 Data.Int.Int32 -- ^ __image_size__: A 1-D tensor with value `[batch, image_height, image_width, depth]`
                                +                                                        -- containing the original image size. Both `image_height` and `image_width` need
                                +                                                        -- to be positive.
                                +                           -> Tensor Build t -- ^ __output__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
                                +cropAndResizeGradImage' op'options grads boxes box_ind
                                +                        image_size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads,
                                +                                                             buildInputs boxes,
                                +                                                             buildInputs box_ind,
                                +                                                             buildInputs image_size]
                                +        return (opDef "CropAndResizeGradImage"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "grads"
                                +  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "boxes"
                                +  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "box_ind"
                                +  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "image_size"
                                +  description: "A 1-D tensor with value `[batch, image_height, image_width, depth]`\ncontaining the original image size. Both `image_height` and `image_width` need\nto be positive."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "method"
                                +  type: "string"
                                +  default_value { s: "bilinear" }
                                +  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
                                +  allowed_values { list { s: "bilinear" } }
                                +}
                                +-}
                                +
                                +-- | Compute the pairwise cross product.
                                +--
                                +-- `a` and `b` must be the same shape; they can either be simple 3-element vectors,
                                +-- or any shape where the innermost dimension is 3. In the latter case, each pair
                                +-- of corresponding 3-element vectors is cross-multiplied independently.
                                +cross :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => 
                                +         Tensor v'1 t -- ^ __a__: A tensor containing 3-element vectors.
                                +         -> Tensor v'2 t -- ^ __b__: Another tensor, of same type and shape as `a`.
                                +         -> Tensor Build t -- ^ __product__: Pairwise cross product of the vectors in `a` and `b`.
                                +cross = cross' id
                                +cross' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Int.Int64, Data.Int.Int8,
                                +                                      Data.Word.Word16, Data.Word.Word8, Double,
                                +                                      Float] t) => OpParams ->
                                +          Tensor v'1 t -- ^ __a__: A tensor containing 3-element vectors.
                                +          -> Tensor v'2 t -- ^ __b__: Another tensor, of same type and shape as `a`.
                                +          -> Tensor Build t -- ^ __product__: Pairwise cross product of the vectors in `a` and `b`.
                                +cross' op'options a b | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a,
                                +                                                             buildInputs b]
                                +        return (opDef "Cross"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "a"
                                +  description: "A tensor containing 3-element vectors."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "b"
                                +  description: "Another tensor, of same type and shape as `a`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "product"
                                +  description: "Pairwise cross product of the vectors in `a` and `b`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Compute the cumulative product of the tensor `x` along `axis`.
                                +--
                                +-- By default, this op performs an inclusive cumprod, which means that the first
                                +-- element of the input is identical to the first element of the output:
                                +-- 
                                +-- ```python
                                +-- tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
                                +-- ```
                                +-- 
                                +-- By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
                                +-- performed instead:
                                +-- 
                                +-- ```python
                                +-- tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
                                +-- ```
                                +-- 
                                +-- By setting the `reverse` kwarg to `True`, the cumprod is performed in the
                                +-- opposite direction:
                                +-- 
                                +-- ```python
                                +-- tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
                                +-- ```
                                +-- 
                                +-- This is more efficient than using separate `tf.reverse` ops.
                                +-- 
                                +-- The `reverse` and `exclusive` kwargs can also be combined:
                                +-- 
                                +-- ```python
                                +-- tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
                                +-- ```
                                +cumprod :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t,
                                +                                    OneOf '[Data.Int.Int32,
                                +                                            Data.Int.Int64] tidx) => 
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor v'2 tidx -- ^ __axis__
                                +           -> Tensor Build t -- ^ __out__
                                +cumprod = cumprod' id
                                +cumprod' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float),
                                +                                             Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t,
                                +                                     OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] tidx) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 tidx -- ^ __axis__
                                +            -> Tensor Build t -- ^ __out__
                                +cumprod' op'options x axis | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs axis]
                                +        return (opDef "Cumprod"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "axis" type_attr: "Tidx" }
                                +output_arg { name: "out" type_attr: "T" }
                                +attr { name: "exclusive" type: "bool" default_value { b: false } }
                                +attr { name: "reverse" type: "bool" default_value { b: false } }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Compute the cumulative sum of the tensor `x` along `axis`.
                                +--
                                +-- By default, this op performs an inclusive cumsum, which means that the first
                                +-- element of the input is identical to the first element of the output:
                                +-- 
                                +-- ```python
                                +-- tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
                                +-- ```
                                +-- 
                                +-- By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
                                +-- performed instead:
                                +-- 
                                +-- ```python
                                +-- tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
                                +-- ```
                                +-- 
                                +-- By setting the `reverse` kwarg to `True`, the cumsum is performed in the
                                +-- opposite direction:
                                +-- 
                                +-- ```python
                                +-- tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
                                +-- ```
                                +-- 
                                +-- This is more efficient than using separate `tf.reverse` ops.
                                +-- 
                                +-- The `reverse` and `exclusive` kwargs can also be combined:
                                +-- 
                                +-- ```python
                                +-- tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
                                +-- ```
                                +cumsum :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                           (Data.Complex.Complex Float),
                                +                                           Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16, Data.Word.Word8,
                                +                                           Double, Float] t,
                                +                                   OneOf '[Data.Int.Int32,
                                +                                           Data.Int.Int64] tidx) => 
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor v'2 tidx -- ^ __axis__
                                +          -> Tensor Build t -- ^ __out__
                                +cumsum = cumsum' id
                                +cumsum' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t,
                                +                                    OneOf '[Data.Int.Int32,
                                +                                            Data.Int.Int64] tidx) => OpParams ->
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor v'2 tidx -- ^ __axis__
                                +           -> Tensor Build t -- ^ __out__
                                +cumsum' op'options x axis | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs axis]
                                +        return (opDef "Cumsum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "axis" type_attr: "Tidx" }
                                +output_arg { name: "out" type_attr: "T" }
                                +attr { name: "exclusive" type: "bool" default_value { b: false } }
                                +attr { name: "reverse" type: "bool" default_value { b: false } }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Identity op for gradient debugging.
                                +--
                                +-- This op is hidden from public in Python. It is used by TensorFlow Debugger to
                                +-- register gradient tensors for gradient debugging.
                                +debugGradientIdentity :: forall v'1 t . (TensorType t) => 
                                +                         Tensor v'1 t -- ^ __input__
                                +                         -> Tensor Build t -- ^ __output__
                                +debugGradientIdentity = debugGradientIdentity' id
                                +debugGradientIdentity' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                          Tensor v'1 t -- ^ __input__
                                +                          -> Tensor Build t -- ^ __output__
                                +debugGradientIdentity' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "DebugGradientIdentity"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Decode web-safe base64-encoded strings.
                                +--
                                +-- Input may or may not have padding at the end. See EncodeBase64 for padding.
                                +-- Web-safe means that input must use - and _ instead of + and /.
                                +decodeBase64 :: 
                                +                Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Base64 strings to decode.
                                +                -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Decoded strings.
                                +decodeBase64 = decodeBase64' id
                                +decodeBase64' :: OpParams ->
                                +                 Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Base64 strings to decode.
                                +                 -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Decoded strings.
                                +decodeBase64' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "DecodeBase64"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Base64 strings to decode."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "output" description: "Decoded strings." type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Decode the first frame of a BMP-encoded image to a uint8 tensor.
                                +--
                                +-- The attr `channels` indicates the desired number of color channels for the
                                +-- decoded image.
                                +-- 
                                +-- Accepted values are:
                                +-- 
                                +-- *   0: Use the number of channels in the BMP-encoded image.
                                +-- *   3: output an RGB image.
                                +-- *   4: output an RGBA image.
                                +decodeBmp :: 
                                +             Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The BMP-encoded image.
                                +             -> Tensor Build Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`. RGB order
                                +decodeBmp = decodeBmp' id
                                +decodeBmp' :: OpParams ->
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The BMP-encoded image.
                                +              -> Tensor Build Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`. RGB order
                                +decodeBmp' op'options contents | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents]
                                +        return (opDef "DecodeBmp"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "contents"
                                +  description: "0-D.  The BMP-encoded image."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "image"
                                +  description: "3-D with shape `[height, width, channels]`. RGB order"
                                +  type: DT_UINT8
                                +}
                                +attr { name: "channels" type: "int" default_value { i: 0 } }
                                +-}
                                +
                                +-- | Convert CSV records to tensors. Each column maps to one tensor.
                                +--
                                +-- RFC 4180 format is expected for the CSV records.
                                +-- (https://tools.ietf.org/html/rfc4180)
                                +-- Note that we allow leading and trailing spaces with int or float field.
                                +decodeCSV :: forall v'1 v'2 oUT_TYPE . (OneOfs '[Data.ByteString.ByteString,
                                +                                                 Data.Int.Int32, Data.Int.Int64,
                                +                                                 Float] oUT_TYPE) => 
                                +             Tensor v'1 Data.ByteString.ByteString -- ^ __records__: Each string is a record/row in the csv and all records should have
                                +                                                   -- the same format.
                                +             -> TensorList (v'2) oUT_TYPE -- ^ __record_defaults__: One tensor per column of the input record, with either a
                                +                                          -- scalar default value for that column or empty if the column is required.
                                +             -> TensorList (Build) oUT_TYPE -- ^ __output__: Each tensor will have the same shape as records.
                                +decodeCSV = decodeCSV' id
                                +decodeCSV' :: forall v'1 v'2 oUT_TYPE . (OneOfs '[Data.ByteString.ByteString,
                                +                                                  Data.Int.Int32,
                                +                                                  Data.Int.Int64,
                                +                                                  Float] oUT_TYPE) =>
                                +              OpParams ->
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __records__: Each string is a record/row in the csv and all records should have
                                +                                                    -- the same format.
                                +              -> TensorList (v'2) oUT_TYPE -- ^ __record_defaults__: One tensor per column of the input record, with either a
                                +                                           -- scalar default value for that column or empty if the column is required.
                                +              -> TensorList (Build) oUT_TYPE -- ^ __output__: Each tensor will have the same shape as records.
                                +decodeCSV' op'options records record_defaults | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs records,
                                +                                                             buildInputs record_defaults]
                                +        return (opDef "DecodeCSV"
                                +                & opAttr "OUT_TYPE" .~ fromTensorTypes (Proxy :: Proxy oUT_TYPE)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "records"
                                +  description: "Each string is a record/row in the csv and all records should have\nthe same format."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "record_defaults"
                                +  description: "One tensor per column of the input record, with either a\nscalar default value for that column or empty if the column is required."
                                +  type_list_attr: "OUT_TYPE"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Each tensor will have the same shape as records."
                                +  type_list_attr: "OUT_TYPE"
                                +}
                                +attr {
                                +  name: "OUT_TYPE"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT type: DT_INT32 type: DT_INT64 type: DT_STRING
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "field_delim"
                                +  type: "string"
                                +  default_value { s: "," }
                                +  description: "char delimiter to separate fields in a record."
                                +}
                                +attr {
                                +  name: "use_quote_delim"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If false, treats double quotation marks as regular\ncharacters inside of the string fields (ignoring RFC 4180, Section 2,\nBullet 5)."
                                +}
                                +-}
                                +
                                +-- | Decode the first frame of a GIF-encoded image to a uint8 tensor.
                                +--
                                +-- GIF with frame or transparency compression are not supported
                                +-- convert animated GIF from compressed to uncompressed by:
                                +-- 
                                +--     convert $src.gif -coalesce $dst.gif
                                +-- 
                                +-- This op also supports decoding JPEGs and PNGs, though it is cleaner to use
                                +-- `tf.image.decode_image`.
                                +decodeGif :: 
                                +             Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The GIF-encoded image.
                                +             -> Tensor Build Data.Word.Word8 -- ^ __image__: 4-D with shape `[num_frames, height, width, 3]`. RGB order
                                +decodeGif = decodeGif' id
                                +decodeGif' :: OpParams ->
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The GIF-encoded image.
                                +              -> Tensor Build Data.Word.Word8 -- ^ __image__: 4-D with shape `[num_frames, height, width, 3]`. RGB order
                                +decodeGif' op'options contents | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents]
                                +        return (opDef "DecodeGif"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "contents"
                                +  description: "0-D.  The GIF-encoded image."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "image"
                                +  description: "4-D with shape `[num_frames, height, width, 3]`. RGB order"
                                +  type: DT_UINT8
                                +}
                                +-}
                                +
                                +-- | Convert JSON-encoded Example records to binary protocol buffer strings.
                                +--
                                +-- This op translates a tensor containing Example records, encoded using
                                +-- the [standard JSON
                                +-- mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
                                +-- into a tensor containing the same records encoded as binary protocol
                                +-- buffers. The resulting tensor can then be fed to any of the other
                                +-- Example-parsing ops.
                                +decodeJSONExample :: 
                                +                     Tensor v'1 Data.ByteString.ByteString -- ^ __json_examples__: Each string is a JSON object serialized according to the JSON
                                +                                                           -- mapping of the Example proto.
                                +                     -> Tensor Build Data.ByteString.ByteString -- ^ __binary_examples__: Each string is a binary Example protocol buffer corresponding
                                +                     -- to the respective element of `json_examples`.
                                +decodeJSONExample = decodeJSONExample' id
                                +decodeJSONExample' :: OpParams ->
                                +                      Tensor v'1 Data.ByteString.ByteString -- ^ __json_examples__: Each string is a JSON object serialized according to the JSON
                                +                                                            -- mapping of the Example proto.
                                +                      -> Tensor Build Data.ByteString.ByteString -- ^ __binary_examples__: Each string is a binary Example protocol buffer corresponding
                                +                      -- to the respective element of `json_examples`.
                                +decodeJSONExample' op'options json_examples | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs json_examples]
                                +        return (opDef "DecodeJSONExample"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "json_examples"
                                +  description: "Each string is a JSON object serialized according to the JSON\nmapping of the Example proto."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "binary_examples"
                                +  description: "Each string is a binary Example protocol buffer corresponding\nto the respective element of `json_examples`."
                                +  type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Decode a JPEG-encoded image to a uint8 tensor.
                                +--
                                +-- The attr `channels` indicates the desired number of color channels for the
                                +-- decoded image.
                                +-- 
                                +-- Accepted values are:
                                +-- 
                                +-- *   0: Use the number of channels in the JPEG-encoded image.
                                +-- *   1: output a grayscale image.
                                +-- *   3: output an RGB image.
                                +-- 
                                +-- If needed, the JPEG-encoded image is transformed to match the requested number
                                +-- of color channels.
                                +-- 
                                +-- The attr `ratio` allows downscaling the image by an integer factor during
                                +-- decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
                                +-- downscaling the image later.
                                +-- 
                                +-- This op also supports decoding PNGs and non-animated GIFs since the interface is
                                +-- the same, though it is cleaner to use `tf.image.decode_image`.
                                +decodeJpeg :: 
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The JPEG-encoded image.
                                +              -> Tensor Build Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`..
                                +decodeJpeg = decodeJpeg' id
                                +decodeJpeg' :: OpParams ->
                                +               Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The JPEG-encoded image.
                                +               -> Tensor Build Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`..
                                +decodeJpeg' op'options contents | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents]
                                +        return (opDef "DecodeJpeg"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "contents"
                                +  description: "0-D.  The JPEG-encoded image."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "image"
                                +  description: "3-D with shape `[height, width, channels]`.."
                                +  type: DT_UINT8
                                +}
                                +attr {
                                +  name: "channels"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of color channels for the decoded image."
                                +}
                                +attr {
                                +  name: "ratio"
                                +  type: "int"
                                +  default_value { i: 1 }
                                +  description: "Downscaling ratio."
                                +}
                                +attr {
                                +  name: "fancy_upscaling"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only)."
                                +}
                                +attr {
                                +  name: "try_recover_truncated"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true try to recover an image from truncated input."
                                +}
                                +attr {
                                +  name: "acceptable_fraction"
                                +  type: "float"
                                +  default_value { f: 1.0 }
                                +  description: "The minimum required fraction of lines before a truncated\ninput is accepted."
                                +}
                                +attr {
                                +  name: "dct_method"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "string specifying a hint about the algorithm used for\ndecompression.  Defaults to \"\" which maps to a system-specific\ndefault.  Currently valid values are [\"INTEGER_FAST\",\n\"INTEGER_ACCURATE\"].  The hint may be ignored (e.g., the internal\njpeg library changes to a version that does not have that specific\noption.)"
                                +}
                                +-}
                                +
                                +-- | Decode a PNG-encoded image to a uint8 or uint16 tensor.
                                +--
                                +-- The attr `channels` indicates the desired number of color channels for the
                                +-- decoded image.
                                +-- 
                                +-- Accepted values are:
                                +-- 
                                +-- *   0: Use the number of channels in the PNG-encoded image.
                                +-- *   1: output a grayscale image.
                                +-- *   3: output an RGB image.
                                +-- *   4: output an RGBA image.
                                +-- 
                                +-- If needed, the PNG-encoded image is transformed to match the requested number
                                +-- of color channels.
                                +-- 
                                +-- This op also supports decoding JPEGs and non-animated GIFs since the interface
                                +-- is the same, though it is cleaner to use `tf.image.decode_image`.
                                +decodePng :: forall v'1 dtype . (OneOf '[Data.Word.Word16,
                                +                                         Data.Word.Word8] dtype) => 
                                +             Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The PNG-encoded image.
                                +             -> Tensor Build dtype -- ^ __image__: 3-D with shape `[height, width, channels]`.
                                +decodePng = decodePng' id
                                +decodePng' :: forall v'1 dtype . (OneOf '[Data.Word.Word16,
                                +                                          Data.Word.Word8] dtype) => OpParams ->
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The PNG-encoded image.
                                +              -> Tensor Build dtype -- ^ __image__: 3-D with shape `[height, width, channels]`.
                                +decodePng' op'options contents | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents]
                                +        return (opDef "DecodePng"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "contents"
                                +  description: "0-D.  The PNG-encoded image."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "image"
                                +  description: "3-D with shape `[height, width, channels]`."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "channels"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of color channels for the decoded image."
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  default_value { type: DT_UINT8 }
                                +  allowed_values { list { type: DT_UINT8 type: DT_UINT16 } }
                                +}
                                +-}
                                +
                                +-- | Reinterpret the bytes of a string as a vector of numbers.
                                +
                                +decodeRaw :: forall v'1 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] out_type) => 
                                +             Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__: All the elements must have the same length.
                                +             -> Tensor Build out_type -- ^ __output__: A Tensor with one more dimension than the input `bytes`.  The
                                +             -- added dimension will have size equal to the length of the elements
                                +             -- of `bytes` divided by the number of bytes to represent `out_type`.
                                +decodeRaw = decodeRaw' id
                                +decodeRaw' :: forall v'1 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] out_type) =>
                                +              OpParams ->
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__: All the elements must have the same length.
                                +              -> Tensor Build out_type -- ^ __output__: A Tensor with one more dimension than the input `bytes`.  The
                                +              -- added dimension will have size equal to the length of the elements
                                +              -- of `bytes` divided by the number of bytes to represent `out_type`.
                                +decodeRaw' op'options bytes | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs bytes]
                                +        return (opDef "DecodeRaw"
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "bytes"
                                +  description: "All the elements must have the same length."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A Tensor with one more dimension than the input `bytes`.  The\nadded dimension will have size equal to the length of the elements\nof `bytes` divided by the number of bytes to represent `out_type`."
                                +  type_attr: "out_type"
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_INT64
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "little_endian"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "Whether the input `bytes` are in little-endian order.\nIgnored for `out_type` values that are stored in a single byte like\n`uint8`."
                                +}
                                +-}
                                +
                                +-- | Decode a 16-bit PCM WAV file to a float tensor.
                                +--
                                +-- The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
                                +-- 
                                +-- When desired_channels is set, if the input contains fewer channels than this
                                +-- then the last channel will be duplicated to give the requested number, else if
                                +-- the input has more channels than requested then the additional channels will be
                                +-- ignored.
                                +-- 
                                +-- If desired_samples is set, then the audio will be cropped or padded with zeroes
                                +-- to the requested length.
                                +-- 
                                +-- The first output contains a Tensor with the content of the audio samples. The
                                +-- lowest dimension will be the number of channels, and the second will be the
                                +-- number of samples. For example, a ten-sample-long stereo WAV file should give an
                                +-- output shape of [10, 2].
                                +decodeWav :: 
                                +             Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: The WAV-encoded audio, usually from a file.
                                +             -> (Tensor Build Float, Tensor Build Data.Int.Int32)
                                +             -- ^ (__audio__, __sample_rate__)
                                +             --
                                +             -- * __audio__: 2-D with shape `[length, channels]`.
                                +             --
                                +             -- * __sample_rate__: Scalar holding the sample rate found in the WAV header.
                                +decodeWav = decodeWav' id
                                +decodeWav' :: OpParams ->
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: The WAV-encoded audio, usually from a file.
                                +              -> (Tensor Build Float, Tensor Build Data.Int.Int32)
                                +              -- ^ (__audio__, __sample_rate__)
                                +              --
                                +              -- * __audio__: 2-D with shape `[length, channels]`.
                                +              --
                                +              -- * __sample_rate__: Scalar holding the sample rate found in the WAV header.
                                +decodeWav' op'options contents | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents]
                                +        return (opDef "DecodeWav"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "contents"
                                +  description: "The WAV-encoded audio, usually from a file."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "audio"
                                +  description: "2-D with shape `[length, channels]`."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "sample_rate"
                                +  description: "Scalar holding the sample rate found in the WAV header."
                                +  type: DT_INT32
                                +}
                                +attr {
                                +  name: "desired_channels"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "Number of sample channels wanted."
                                +}
                                +attr {
                                +  name: "desired_samples"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "Length of audio requested."
                                +}
                                +-}
                                +
                                +-- | Delete the tensor specified by its handle in the session.
                                +
                                +deleteSessionTensor :: forall v'1 m' . (MonadBuild m') => 
                                +                       Tensor v'1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state.
                                +                       -> m' (ControlNode)
                                +deleteSessionTensor = deleteSessionTensor' id
                                +deleteSessionTensor' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                        Tensor v'1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state.
                                +                        -> m' (ControlNode)
                                +deleteSessionTensor' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "DeleteSessionTensor"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle for a tensor stored in the session state."
                                +  type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Applies set operation along last dimension of 2 `Tensor` inputs.
                                +--
                                +-- See SetOperationOp::SetOperationFromContext for values of `set_operation`.
                                +-- 
                                +-- Output `result` is a `SparseTensor` represented by `result_indices`,
                                +-- `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
                                +-- has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
                                +-- dimension contains the result of `set_operation` applied to the corresponding
                                +-- `[0...n-1]` dimension of `set`.
                                +denseToDenseSetOperation :: forall v'1 v'2
                                +                            t . (OneOf '[Data.ByteString.ByteString,
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16,
                                +                                         Data.Word.Word8] t) => 
                                +                            Tensor v'1 t -- ^ __set1__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
                                +                                         -- Dimension `n` contains values in a set, duplicates are allowed but ignored.
                                +                            -> Tensor v'2 t -- ^ __set2__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
                                +                                            -- Dimension `n` contains values in a set, duplicates are allowed but ignored.
                                +                            -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                                Tensor Build Data.Int.Int64)
                                +                            -- ^ (__result_indices__, __result_values__, __result_shape__)
                                +                            --
                                +                            -- * __result_indices__: 2D indices of a `SparseTensor`.
                                +                            --
                                +                            -- * __result_values__: 1D values of a `SparseTensor`.
                                +                            --
                                +                            -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
                                +                            -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
                                +                            -- is the max result set size across all `0...n-1` dimensions.
                                +denseToDenseSetOperation = denseToDenseSetOperation' id
                                +denseToDenseSetOperation' :: forall v'1 v'2
                                +                             t . (OneOf '[Data.ByteString.ByteString,
                                +                                          Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16,
                                +                                          Data.Word.Word8] t) => OpParams ->
                                +                             Tensor v'1 t -- ^ __set1__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
                                +                                          -- Dimension `n` contains values in a set, duplicates are allowed but ignored.
                                +                             -> Tensor v'2 t -- ^ __set2__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
                                +                                             -- Dimension `n` contains values in a set, duplicates are allowed but ignored.
                                +                             -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                                 Tensor Build Data.Int.Int64)
                                +                             -- ^ (__result_indices__, __result_values__, __result_shape__)
                                +                             --
                                +                             -- * __result_indices__: 2D indices of a `SparseTensor`.
                                +                             --
                                +                             -- * __result_values__: 1D values of a `SparseTensor`.
                                +                             --
                                +                             -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
                                +                             -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
                                +                             -- is the max result set size across all `0...n-1` dimensions.
                                +denseToDenseSetOperation' op'options set1 set2 | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set1,
                                +                                                             buildInputs set2]
                                +        return (opDef "DenseToDenseSetOperation"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "set1"
                                +  description: "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "set2"
                                +  description: "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.\nDimension `n` contains values in a set, duplicates are allowed but ignored."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "result_indices"
                                +  description: "2D indices of a `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "result_values"
                                +  description: "1D values of a `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "result_shape"
                                +  description: "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions."
                                +  type: DT_INT64
                                +}
                                +attr { name: "set_operation" type: "string" }
                                +attr {
                                +  name: "validate_indices" type: "bool" default_value { b: true }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_STRING
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that yields a SparseTensor for each element of the input.
                                +
                                +denseToSparseBatchDataset :: forall v'1 v'2 v'3 m' . (MonadBuild m') => 
                                +                             [DataType] -- ^ __output_types__
                                +                             -> Tensor v'1 ResourceHandle -- ^ __input_dataset__: A handle to an input dataset. Must have a single component.
                                +                             -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__: A scalar representing the number of elements to accumulate in a
                                +                                                          -- batch.
                                +                             -> Tensor v'3 Data.Int.Int64 -- ^ __row_shape__: A vector representing the dense shape of each row in the produced
                                +                                                          -- SparseTensor.
                                +                             -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +denseToSparseBatchDataset = denseToSparseBatchDataset' id
                                +denseToSparseBatchDataset' :: forall v'1 v'2 v'3 m' . (MonadBuild m') =>
                                +                              OpParams ->
                                +                              [DataType] -- ^ __output_types__
                                +                              -> Tensor v'1 ResourceHandle -- ^ __input_dataset__: A handle to an input dataset. Must have a single component.
                                +                              -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__: A scalar representing the number of elements to accumulate in a
                                +                                                           -- batch.
                                +                              -> Tensor v'3 Data.Int.Int64 -- ^ __row_shape__: A vector representing the dense shape of each row in the produced
                                +                                                           -- SparseTensor.
                                +                              -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +denseToSparseBatchDataset' op'options output_types input_dataset batch_size
                                +                           row_shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset,
                                +                                                             buildInputs batch_size,
                                +                                                             buildInputs row_shape]
                                +        buildOp [] (opDef "DenseToSparseBatchDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_dataset"
                                +  description: "A handle to an input dataset. Must have a single component."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "batch_size"
                                +  description: "A scalar representing the number of elements to accumulate in a\nbatch."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "row_shape"
                                +  description: "A vector representing the dense shape of each row in the produced\nSparseTensor."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Applies set operation along last dimension of `Tensor` and `SparseTensor`.
                                +--
                                +-- See SetOperationOp::SetOperationFromContext for values of `set_operation`.
                                +-- 
                                +-- Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
                                +-- and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
                                +-- as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
                                +-- ignored.
                                +-- 
                                +-- If `validate_indices` is `True`, this op validates the order and range of `set2`
                                +-- indices.
                                +-- 
                                +-- Output `result` is a `SparseTensor` represented by `result_indices`,
                                +-- `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
                                +-- has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
                                +-- dimension contains the result of `set_operation` applied to the corresponding
                                +-- `[0...n-1]` dimension of `set`.
                                +denseToSparseSetOperation :: forall v'1 v'2 v'3 v'4
                                +                             t . (OneOf '[Data.ByteString.ByteString,
                                +                                          Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16,
                                +                                          Data.Word.Word8] t) => 
                                +                             Tensor v'1 t -- ^ __set1__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
                                +                                          -- Dimension `n` contains values in a set, duplicates are allowed but ignored.
                                +                             -> Tensor v'2 Data.Int.Int64 -- ^ __set2_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
                                +                                                          -- order.
                                +                             -> Tensor v'3 t -- ^ __set2_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
                                +                                             -- order.
                                +                             -> Tensor v'4 Data.Int.Int64 -- ^ __set2_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
                                +                                                          -- be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
                                +                                                          -- max set size across `n-1` dimensions.
                                +                             -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                                 Tensor Build Data.Int.Int64)
                                +                             -- ^ (__result_indices__, __result_values__, __result_shape__)
                                +                             --
                                +                             -- * __result_indices__: 2D indices of a `SparseTensor`.
                                +                             --
                                +                             -- * __result_values__: 1D values of a `SparseTensor`.
                                +                             --
                                +                             -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
                                +                             -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
                                +                             -- is the max result set size across all `0...n-1` dimensions.
                                +denseToSparseSetOperation = denseToSparseSetOperation' id
                                +denseToSparseSetOperation' :: forall v'1 v'2 v'3 v'4
                                +                              t . (OneOf '[Data.ByteString.ByteString,
                                +                                           Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16,
                                +                                           Data.Word.Word8] t) => OpParams ->
                                +                              Tensor v'1 t -- ^ __set1__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
                                +                                           -- Dimension `n` contains values in a set, duplicates are allowed but ignored.
                                +                              -> Tensor v'2 Data.Int.Int64 -- ^ __set2_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
                                +                                                           -- order.
                                +                              -> Tensor v'3 t -- ^ __set2_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
                                +                                              -- order.
                                +                              -> Tensor v'4 Data.Int.Int64 -- ^ __set2_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
                                +                                                           -- be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
                                +                                                           -- max set size across `n-1` dimensions.
                                +                              -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                                  Tensor Build Data.Int.Int64)
                                +                              -- ^ (__result_indices__, __result_values__, __result_shape__)
                                +                              --
                                +                              -- * __result_indices__: 2D indices of a `SparseTensor`.
                                +                              --
                                +                              -- * __result_values__: 1D values of a `SparseTensor`.
                                +                              --
                                +                              -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
                                +                              -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
                                +                              -- is the max result set size across all `0...n-1` dimensions.
                                +denseToSparseSetOperation' op'options set1 set2_indices set2_values
                                +                           set2_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set1,
                                +                                                             buildInputs set2_indices,
                                +                                                             buildInputs set2_values,
                                +                                                             buildInputs set2_shape]
                                +        return (opDef "DenseToSparseSetOperation"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "set1"
                                +  description: "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "set2_indices"
                                +  description: "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "set2_values"
                                +  description: "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "set2_shape"
                                +  description: "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the\nmax set size across `n-1` dimensions."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "result_indices"
                                +  description: "2D indices of a `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "result_values"
                                +  description: "1D values of a `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "result_shape"
                                +  description: "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions."
                                +  type: DT_INT64
                                +}
                                +attr { name: "set_operation" type: "string" }
                                +attr {
                                +  name: "validate_indices" type: "bool" default_value { b: true }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_STRING
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | DepthToSpace for tensors of type T.
                                +--
                                +-- Rearranges data from depth into blocks of spatial data.
                                +-- This is the reverse transformation of SpaceToDepth. More specifically,
                                +-- this op outputs a copy of the input tensor where values from the `depth`
                                +-- dimension are moved in spatial blocks to the `height` and `width` dimensions.
                                +-- The attr `block_size` indicates the input block size and how the data is moved.
                                +-- 
                                +--   * Chunks of data of size `block_size * block_size` from depth are rearranged
                                +--     into non-overlapping blocks of size `block_size x block_size`
                                +--   * The width the output tensor is `input_depth * block_size`, whereas the
                                +--     height is `input_height * block_size`.
                                +--   * The depth of the input tensor must be divisible by
                                +--     `block_size * block_size`.
                                +-- 
                                +-- That is, assuming the input is in the shape:
                                +-- `[batch, height, width, depth]`,
                                +-- the shape of the output will be:
                                +-- `[batch, height*block_size, width*block_size, depth/(block_size*block_size)]`
                                +-- 
                                +-- This operation requires that the input tensor be of rank 4, and that
                                +-- `block_size` be >=1 and that `block_size * block_size` be a divisor of the
                                +-- input depth.
                                +-- 
                                +-- This operation is useful for resizing the activations between convolutions
                                +-- (but keeping all data), e.g. instead of pooling. It is also useful for training
                                +-- purely convolutional models.
                                +-- 
                                +-- For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:
                                +-- 
                                +-- ```
                                +-- x = [[[[1, 2, 3, 4]]]]
                                +-- 
                                +-- ```
                                +-- 
                                +-- This operation will output a tensor of shape `[1, 2, 2, 1]`:
                                +-- 
                                +-- ```
                                +--    [[[[1], [2]],
                                +--      [[3], [4]]]]
                                +-- ```
                                +-- 
                                +-- Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
                                +-- the corresponding output will have 2x2 elements and will have a depth of
                                +-- 1 channel (1 = `4 / (block_size * block_size)`).
                                +-- The output element shape is `[2, 2, 1]`.
                                +-- 
                                +-- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
                                +-- 
                                +-- ```
                                +-- x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
                                +-- ```
                                +-- 
                                +-- This operation, for block size of 2, will return the following tensor of shape
                                +-- `[1, 2, 2, 3]`
                                +-- 
                                +-- ```
                                +--    [[[[1, 2, 3], [4, 5, 6]],
                                +--      [[7, 8, 9], [10, 11, 12]]]]
                                +-- 
                                +-- ```
                                +-- 
                                +-- Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
                                +-- 
                                +-- ```
                                +-- x =  [[[[1, 2, 3, 4],
                                +--        [5, 6, 7, 8]],
                                +--       [[9, 10, 11, 12],
                                +--        [13, 14, 15, 16]]]]
                                +-- ```
                                +-- 
                                +-- the operator will return the following tensor of shape `[1 4 4 1]`:
                                +-- 
                                +-- ```
                                +-- x = [[ [1],   [2],  [5],  [6]],
                                +--      [ [3],   [4],  [7],  [8]],
                                +--      [ [9],  [10], [13],  [14]],
                                +--      [ [11], [12], [15],  [16]]]
                                +-- 
                                +-- ```
                                +depthToSpace :: forall v'1 t . (TensorType t) => 
                                +                Data.Int.Int64 -- ^ __block_size__: The size of the spatial block, same as in Space2Depth.
                                +                -> Tensor v'1 t -- ^ __input__
                                +                -> Tensor Build t -- ^ __output__
                                +depthToSpace = depthToSpace' id
                                +depthToSpace' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                 Data.Int.Int64 -- ^ __block_size__: The size of the spatial block, same as in Space2Depth.
                                +                 -> Tensor v'1 t -- ^ __input__
                                +                 -> Tensor Build t -- ^ __output__
                                +depthToSpace' op'options block_size input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "DepthToSpace"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "block_size" .~ block_size
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "block_size"
                                +  type: "int"
                                +  description: "The size of the spatial block, same as in Space2Depth."
                                +  has_minimum: true
                                +  minimum: 2
                                +}
                                +-}
                                +
                                +-- | Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
                                +--
                                +-- Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
                                +-- and a filter / kernel tensor of shape
                                +-- `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
                                +-- `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
                                +-- a different filter to each input channel (expanding from 1 channel to
                                +-- `channel_multiplier` channels for each), then concatenates the results
                                +-- together. Thus, the output has `in_channels * channel_multiplier` channels.
                                +-- 
                                +-- ```
                                +-- for k in 0..in_channels-1
                                +--   for q in 0..channel_multiplier-1
                                +--     output[b, i, j, k * channel_multiplier + q] =
                                +--       sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
                                +--                         filter[di, dj, k, q]
                                +-- ```
                                +-- 
                                +-- Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
                                +-- horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
                                +depthwiseConv2dNative :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +                         Tensor v'1 t -- ^ __input__
                                +                         -> Tensor v'2 t -- ^ __filter__
                                +                         -> Tensor Build t -- ^ __output__
                                +depthwiseConv2dNative = depthwiseConv2dNative' id
                                +depthwiseConv2dNative' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 t -- ^ __input__
                                +                          -> Tensor v'2 t -- ^ __filter__
                                +                          -> Tensor Build t -- ^ __output__
                                +depthwiseConv2dNative' op'options input filter | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter]
                                +        return (opDef "DepthwiseConv2dNative"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg { name: "filter" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`."
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, channels, height, width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradients of depthwise convolution with respect to the filter.
                                +
                                +depthwiseConv2dNativeBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Double,
                                +                                                                       Float] t) =>
                                +                                       
                                +                                       Tensor v'1 t -- ^ __input__: 4-D with shape based on `data_format`.  For example, if
                                +                                                    -- `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
                                +                                                    -- in_width, in_channels]` tensor.
                                +                                       -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
                                +                                                                    -- where `filter` is a 4-D
                                +                                                                    -- `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
                                +                                       -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape  based on `data_format`.
                                +                                                       -- For example, if `data_format` is 'NHWC' then
                                +                                                       -- out_backprop shape is `[batch, out_height, out_width, out_channels]`.
                                +                                                       -- Gradients w.r.t. the output of the convolution.
                                +                                       -> Tensor Build t -- ^ __output__: 4-D with shape
                                +                                       -- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
                                +                                       -- the `filter` input of the convolution.
                                +depthwiseConv2dNativeBackpropFilter = depthwiseConv2dNativeBackpropFilter' id
                                +depthwiseConv2dNativeBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Double,
                                +                                                                        Float] t) =>
                                +                                        OpParams ->
                                +                                        Tensor v'1 t -- ^ __input__: 4-D with shape based on `data_format`.  For example, if
                                +                                                     -- `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
                                +                                                     -- in_width, in_channels]` tensor.
                                +                                        -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
                                +                                                                     -- where `filter` is a 4-D
                                +                                                                     -- `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
                                +                                        -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape  based on `data_format`.
                                +                                                        -- For example, if `data_format` is 'NHWC' then
                                +                                                        -- out_backprop shape is `[batch, out_height, out_width, out_channels]`.
                                +                                                        -- Gradients w.r.t. the output of the convolution.
                                +                                        -> Tensor Build t -- ^ __output__: 4-D with shape
                                +                                        -- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
                                +                                        -- the `filter` input of the convolution.
                                +depthwiseConv2dNativeBackpropFilter' op'options input filter_sizes
                                +                                     out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter_sizes,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "DepthwiseConv2dNativeBackpropFilter"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape based on `data_format`.  For example, if\n`data_format` is \'NHWC\' then `input` is a 4-D `[batch, in_height,\nin_width, in_channels]` tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter_sizes"
                                +  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "4-D with shape  based on `data_format`.\nFor example, if `data_format` is \'NHWC\' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the input\nof the convolution."
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, channels, height, width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradients of depthwise convolution with respect to the input.
                                +
                                +depthwiseConv2dNativeBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Double,
                                +                                                                      Float] t) =>
                                +                                      
                                +                                      Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`, based
                                +                                                                -- on `data_format`.  For example, if `data_format` is 'NHWC' then
                                +                                                                --  `input` is a 4-D `[batch, height, width, channels]` tensor.
                                +                                      -> Tensor v'2 t -- ^ __filter__: 4-D with shape
                                +                                                      -- `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
                                +                                      -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape  based on `data_format`.
                                +                                                      -- For example, if `data_format` is 'NHWC' then
                                +                                                      -- out_backprop shape is `[batch, out_height, out_width, out_channels]`.
                                +                                                      -- Gradients w.r.t. the output of the convolution.
                                +                                      -> Tensor Build t -- ^ __output__: 4-D with shape according to `data_format`.  For example, if
                                +                                      -- `data_format` is 'NHWC', output shape is `[batch, in_height,
                                +                                      -- in_width, in_channels]`.  Gradient w.r.t. the input of the
                                +                                      -- convolution.
                                +depthwiseConv2dNativeBackpropInput = depthwiseConv2dNativeBackpropInput' id
                                +depthwiseConv2dNativeBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Double,
                                +                                                                       Float] t) =>
                                +                                       OpParams ->
                                +                                       Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`, based
                                +                                                                 -- on `data_format`.  For example, if `data_format` is 'NHWC' then
                                +                                                                 --  `input` is a 4-D `[batch, height, width, channels]` tensor.
                                +                                       -> Tensor v'2 t -- ^ __filter__: 4-D with shape
                                +                                                       -- `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
                                +                                       -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape  based on `data_format`.
                                +                                                       -- For example, if `data_format` is 'NHWC' then
                                +                                                       -- out_backprop shape is `[batch, out_height, out_width, out_channels]`.
                                +                                                       -- Gradients w.r.t. the output of the convolution.
                                +                                       -> Tensor Build t -- ^ __output__: 4-D with shape according to `data_format`.  For example, if
                                +                                       -- `data_format` is 'NHWC', output shape is `[batch, in_height,
                                +                                       -- in_width, in_channels]`.  Gradient w.r.t. the input of the
                                +                                       -- convolution.
                                +depthwiseConv2dNativeBackpropInput' op'options input_sizes filter
                                +                                    out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_sizes,
                                +                                                             buildInputs filter,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "DepthwiseConv2dNativeBackpropInput"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_sizes"
                                +  description: "An integer vector representing the shape of `input`, based\non `data_format`.  For example, if `data_format` is \'NHWC\' then\n `input` is a 4-D `[batch, height, width, channels]` tensor."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, depthwise_multiplier]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "4-D with shape  based on `data_format`.\nFor example, if `data_format` is \'NHWC\' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D with shape according to `data_format`.  For example, if\n`data_format` is \'NHWC\', output shape is `[batch, in_height,\nin_width, in_channels]`.  Gradient w.r.t. the input of the\nconvolution."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the input\nof the convolution."
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, channels, height, width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +-}
                                +
                                +-- | Dequantize the 'input' tensor into a float Tensor.
                                +--
                                +-- [min_range, max_range] are scalar floats that specify the range for
                                +-- the 'input' data. The 'mode' attribute controls exactly which calculations are
                                +-- used to convert the float values to their quantized equivalents.
                                +-- 
                                +-- In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
                                +-- 
                                +-- ```
                                +-- if T == qint8, in[i] += (range(T) + 1)/ 2.0
                                +-- out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
                                +-- ```
                                +-- here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
                                +-- 
                                +-- *MIN_COMBINED Mode Example*
                                +-- 
                                +-- If the input comes from a QuantizedRelu6, the output type is
                                +-- quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
                                +-- 0-6.  The min_range and max_range values are therefore 0.0 and 6.0.
                                +-- Dequantize on quint8 will take each value, cast to float, and multiply
                                +-- by 6 / 255.
                                +-- Note that if quantizedtype is qint8, the operation will additionally add
                                +-- each value by 128 prior to casting.
                                +-- 
                                +-- If the mode is 'MIN_FIRST', then this approach is used:
                                +-- 
                                +-- ```c++
                                +-- number_of_steps = 1 << (# of bits in T)
                                +-- range_adjust = number_of_steps / (number_of_steps - 1)
                                +-- range = (range_max - range_min) * range_adjust
                                +-- range_scale = range / number_of_steps
                                +-- const double offset_input = static_cast<double>(input) - lowest_quantized;
                                +-- result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
                                +-- ```
                                +dequantize :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                              Data.Word.Word16,
                                +                                              Data.Word.Word8] t) => 
                                +              Tensor v'1 t -- ^ __input__
                                +              -> Tensor v'2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input.
                                +              -> Tensor v'3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input.
                                +              -> Tensor Build Float -- ^ __output__
                                +dequantize = dequantize' id
                                +dequantize' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                               Data.Word.Word16,
                                +                                               Data.Word.Word8] t) =>
                                +               OpParams ->
                                +               Tensor v'1 t -- ^ __input__
                                +               -> Tensor v'2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input.
                                +               -> Tensor v'3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input.
                                +               -> Tensor Build Float -- ^ __output__
                                +dequantize' op'options input min_range max_range | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs min_range,
                                +                                                             buildInputs max_range]
                                +        return (opDef "Dequantize"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg {
                                +  name: "min_range"
                                +  description: "The minimum scalar value possibly produced for the input."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_range"
                                +  description: "The maximum scalar value possibly produced for the input."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "output" type: DT_FLOAT }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "mode"
                                +  type: "string"
                                +  default_value { s: "MIN_COMBINED" }
                                +  allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" } }
                                +}
                                +-}
                                +
                                +-- | Deserialize and concatenate `SparseTensors` from a serialized minibatch.
                                +--
                                +-- The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
                                +-- `N` is the minibatch size and the rows correspond to packed outputs of
                                +-- `SerializeSparse`.  The ranks of the original `SparseTensor` objects
                                +-- must all match.  When the final `SparseTensor` is created, it has rank one
                                +-- higher than the ranks of the incoming `SparseTensor` objects
                                +-- (they have been concatenated along a new row dimension).
                                +-- 
                                +-- The output `SparseTensor` object's shape values for all dimensions but the
                                +-- first are the max across the input `SparseTensor` objects' shape values
                                +-- for the corresponding dimensions.  Its first shape value is `N`, the minibatch
                                +-- size.
                                +-- 
                                +-- The input `SparseTensor` objects' indices are assumed ordered in
                                +-- standard lexicographic order.  If this is not the case, after this
                                +-- step run `SparseReorder` to restore index ordering.
                                +-- 
                                +-- For example, if the serialized input is a `[2 x 3]` matrix representing two
                                +-- original `SparseTensor` objects:
                                +-- 
                                +--     index = [ 0]
                                +--             [10]
                                +--             [20]
                                +--     values = [1, 2, 3]
                                +--     shape = [50]
                                +-- 
                                +-- and
                                +-- 
                                +--     index = [ 2]
                                +--             [10]
                                +--     values = [4, 5]
                                +--     shape = [30]
                                +-- 
                                +-- then the final deserialized `SparseTensor` will be:
                                +-- 
                                +--     index = [0  0]
                                +--             [0 10]
                                +--             [0 20]
                                +--             [1  2]
                                +--             [1 10]
                                +--     values = [1, 2, 3, 4, 5]
                                +--     shape = [2 50]
                                +deserializeManySparse :: forall v'1 dtype . (TensorType dtype) => 
                                +                         Tensor v'1 Data.ByteString.ByteString -- ^ __serialized_sparse__: 2-D, The `N` serialized `SparseTensor` objects.
                                +                                                               -- Must have 3 columns.
                                +                         -> (Tensor Build Data.Int.Int64, Tensor Build dtype,
                                +                             Tensor Build Data.Int.Int64)
                                +                         -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__)
                                +                         --
                                +                         -- * __sparse_indices__
                                +                         --
                                +                         -- * __sparse_values__
                                +                         --
                                +                         -- * __sparse_shape__
                                +deserializeManySparse = deserializeManySparse' id
                                +deserializeManySparse' :: forall v'1 dtype . (TensorType dtype) => OpParams ->
                                +                          Tensor v'1 Data.ByteString.ByteString -- ^ __serialized_sparse__: 2-D, The `N` serialized `SparseTensor` objects.
                                +                                                                -- Must have 3 columns.
                                +                          -> (Tensor Build Data.Int.Int64, Tensor Build dtype,
                                +                              Tensor Build Data.Int.Int64)
                                +                          -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__)
                                +                          --
                                +                          -- * __sparse_indices__
                                +                          --
                                +                          -- * __sparse_values__
                                +                          --
                                +                          -- * __sparse_shape__
                                +deserializeManySparse' op'options serialized_sparse | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized_sparse]
                                +        return (opDef "DeserializeManySparse"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "serialized_sparse"
                                +  description: "2-D, The `N` serialized `SparseTensor` objects.\nMust have 3 columns."
                                +  type: DT_STRING
                                +}
                                +output_arg { name: "sparse_indices" type: DT_INT64 }
                                +output_arg { name: "sparse_values" type_attr: "dtype" }
                                +output_arg { name: "sparse_shape" type: DT_INT64 }
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The `dtype` of the serialized `SparseTensor` objects."
                                +}
                                +-}
                                +
                                +-- | Deletes the resource specified by the handle.
                                +--
                                +-- All subsequent operations using the resource will result in a NotFound
                                +-- error status.
                                +destroyResourceOp :: forall v'1 m' . (MonadBuild m') => 
                                +                     Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource to delete.
                                +                     -> m' (ControlNode)
                                +destroyResourceOp = destroyResourceOp' id
                                +destroyResourceOp' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                      Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource to delete.
                                +                      -> m' (ControlNode)
                                +destroyResourceOp' op'options resource | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource]
                                +        buildOp [] (opDef "DestroyResourceOp"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "resource"
                                +  description: "handle to the resource to delete."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "ignore_lookup_error"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "whether to ignore the error when the resource\ndoesn\'t exist."
                                +}
                                +-}
                                +
                                +-- | Destroys the temporary variable and returns its final value.
                                +--
                                +-- Sets output to the value of the Tensor pointed to by 'ref', then destroys
                                +-- the temporary variable called 'var_name'.
                                +-- All other uses of 'ref' *must* have executed before this op.
                                +-- This is typically achieved by chaining the ref through each assign op, or by
                                +-- using control dependencies.
                                +-- 
                                +-- Outputs the final value of the tensor pointed to by 'ref'.
                                +destroyTemporaryVariable :: forall t m' . (MonadBuild m', TensorType t) => 
                                +                            Tensor Ref t -- ^ __ref__: A reference to the temporary variable tensor.
                                +                            -> m' (Tensor Value t) -- ^ __value__
                                +destroyTemporaryVariable = destroyTemporaryVariable' id
                                +destroyTemporaryVariable' :: forall t m' . (MonadBuild m', TensorType t) =>
                                +                             OpParams ->
                                +                             Tensor Ref t -- ^ __ref__: A reference to the temporary variable tensor.
                                +                             -> m' (Tensor Value t) -- ^ __value__
                                +destroyTemporaryVariable' op'options ref | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref]
                                +        buildOp [] (opDef "DestroyTemporaryVariable"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "A reference to the temporary variable tensor."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +output_arg { name: "value" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "var_name"
                                +  type: "string"
                                +  description: "Name of the temporary variable, usually the name of the matching\n\'TemporaryVariable\' op."
                                +}
                                +-}
                                +
                                +-- | Returns a diagonal tensor with a given diagonal values.
                                +--
                                +-- Given a `diagonal`, this operation returns a tensor with the `diagonal` and
                                +-- everything else padded with zeros. The diagonal is computed as follows:
                                +-- 
                                +-- Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
                                +-- rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
                                +-- 
                                +-- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 'diagonal' is [1, 2, 3, 4]
                                +-- tf.diag(diagonal) ==> [[1, 0, 0, 0]
                                +--                        [0, 2, 0, 0]
                                +--                        [0, 0, 3, 0]
                                +--                        [0, 0, 0, 4]]
                                +-- ```
                                +diag :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                Data.Int.Int64, Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __diagonal__: Rank k tensor where k is at most 3.
                                +        -> Tensor Build t -- ^ __output__
                                +diag = diag' id
                                +diag' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                 Data.Int.Int64, Double, Float] t) =>
                                +         OpParams ->
                                +         Tensor v'1 t -- ^ __diagonal__: Rank k tensor where k is at most 3.
                                +         -> Tensor Build t -- ^ __output__
                                +diag' op'options diagonal | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonal]
                                +        return (opDef "Diag"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "diagonal"
                                +  description: "Rank k tensor where k is at most 3."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns the diagonal part of the tensor.
                                +--
                                +-- This operation returns a tensor with the `diagonal` part
                                +-- of the `input`. The `diagonal` part is computed as follows:
                                +-- 
                                +-- Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
                                +-- tensor of rank `k` with dimensions `[D1,..., Dk]` where:
                                +-- 
                                +-- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 'input' is [[1, 0, 0, 0]
                                +--               [0, 2, 0, 0]
                                +--               [0, 0, 3, 0]
                                +--               [0, 0, 0, 4]]
                                +-- 
                                +-- tf.diag_part(input) ==> [1, 2, 3, 4]
                                +-- ```
                                +diagPart :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int32, Data.Int.Int64, Double,
                                +                                    Float] t) => 
                                +            Tensor v'1 t -- ^ __input__: Rank k tensor where k is 2, 4, or 6.
                                +            -> Tensor Build t -- ^ __diagonal__: The extracted diagonal.
                                +diagPart = diagPart' id
                                +diagPart' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float),
                                +                                     Data.Int.Int32, Data.Int.Int64, Double,
                                +                                     Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __input__: Rank k tensor where k is 2, 4, or 6.
                                +             -> Tensor Build t -- ^ __diagonal__: The extracted diagonal.
                                +diagPart' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "DiagPart"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Rank k tensor where k is 2, 4, or 6."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "diagonal"
                                +  description: "The extracted diagonal."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes Psi, the derivative of Lgamma (the log of the absolute value of
                                +--
                                +-- `Gamma(x)`), element-wise.
                                +digamma :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor Build t -- ^ __y__
                                +digamma = digamma' id
                                +digamma' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor Build t -- ^ __y__
                                +digamma' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Digamma"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
                                +--
                                +-- The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
                                +-- `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
                                +-- input channel is processed independently of the others with its own structuring
                                +-- function. The `output` tensor has shape
                                +-- `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
                                +-- tensor depend on the `padding` algorithm. We currently only support the default
                                +-- "NHWC" `data_format`.
                                +-- 
                                +-- In detail, the grayscale morphological 2-D dilation is the max-sum correlation
                                +-- (for consistency with `conv2d`, we use unmirrored filters):
                                +-- 
                                +--     output[b, y, x, c] =
                                +--        max_{dy, dx} input[b,
                                +--                           strides[1] * y + rates[1] * dy,
                                +--                           strides[2] * x + rates[2] * dx,
                                +--                           c] +
                                +--                     filter[dy, dx, c]
                                +-- 
                                +-- Max-pooling is a special case when the filter has size equal to the pooling
                                +-- kernel size and contains all zeros.
                                +-- 
                                +-- Note on duality: The dilation of `input` by the `filter` is equal to the
                                +-- negation of the erosion of `-input` by the reflected `filter`.
                                +dilation2D :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t) => 
                                +              Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
                                +              -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
                                +              -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, out_height, out_width, depth]`.
                                +dilation2D = dilation2D' id
                                +dilation2D' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16, Data.Word.Word8,
                                +                                           Double, Float] t) => OpParams ->
                                +               Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
                                +               -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
                                +               -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, out_height, out_width, depth]`.
                                +dilation2D' op'options input filter | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter]
                                +        return (opDef "Dilation2D"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape `[batch, in_height, in_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "3-D with shape `[filter_height, filter_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D with shape `[batch, out_height, out_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the input\ntensor. Must be: `[1, stride_height, stride_width, 1]`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "rates"
                                +  type: "list(int)"
                                +  description: "The input stride for atrous morphological dilation. Must be:\n`[1, rate_height, rate_width, 1]`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradient of morphological 2-D dilation with respect to the filter.
                                +
                                +dilation2DBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                            Data.Int.Int32,
                                +                                                            Data.Int.Int64,
                                +                                                            Data.Int.Int8,
                                +                                                            Data.Word.Word16,
                                +                                                            Data.Word.Word8,
                                +                                                            Double, Float] t) =>
                                +                            
                                +                            Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
                                +                            -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
                                +                            -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.
                                +                            -> Tensor Build t -- ^ __filter_backprop__: 3-D with shape `[filter_height, filter_width, depth]`.
                                +dilation2DBackpropFilter = dilation2DBackpropFilter' id
                                +dilation2DBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                             Data.Int.Int32,
                                +                                                             Data.Int.Int64,
                                +                                                             Data.Int.Int8,
                                +                                                             Data.Word.Word16,
                                +                                                             Data.Word.Word8,
                                +                                                             Double,
                                +                                                             Float] t) =>
                                +                             OpParams ->
                                +                             Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
                                +                             -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
                                +                             -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.
                                +                             -> Tensor Build t -- ^ __filter_backprop__: 3-D with shape `[filter_height, filter_width, depth]`.
                                +dilation2DBackpropFilter' op'options input filter
                                +                          out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "Dilation2DBackpropFilter"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape `[batch, in_height, in_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "3-D with shape `[filter_height, filter_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "4-D with shape `[batch, out_height, out_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "filter_backprop"
                                +  description: "3-D with shape `[filter_height, filter_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "rates"
                                +  type: "list(int)"
                                +  description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Computes the gradient of morphological 2-D dilation with respect to the input.
                                +
                                +dilation2DBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                           Data.Int.Int32,
                                +                                                           Data.Int.Int64,
                                +                                                           Data.Int.Int8,
                                +                                                           Data.Word.Word16,
                                +                                                           Data.Word.Word8,
                                +                                                           Double, Float] t) => 
                                +                           Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
                                +                           -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
                                +                           -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.
                                +                           -> Tensor Build t -- ^ __in_backprop__: 4-D with shape `[batch, in_height, in_width, depth]`.
                                +dilation2DBackpropInput = dilation2DBackpropInput' id
                                +dilation2DBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                            Data.Int.Int32,
                                +                                                            Data.Int.Int64,
                                +                                                            Data.Int.Int8,
                                +                                                            Data.Word.Word16,
                                +                                                            Data.Word.Word8,
                                +                                                            Double, Float] t) =>
                                +                            OpParams ->
                                +                            Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
                                +                            -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
                                +                            -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.
                                +                            -> Tensor Build t -- ^ __in_backprop__: 4-D with shape `[batch, in_height, in_width, depth]`.
                                +dilation2DBackpropInput' op'options input filter
                                +                         out_backprop | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter,
                                +                                                             buildInputs out_backprop]
                                +        return (opDef "Dilation2DBackpropInput"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape `[batch, in_height, in_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "3-D with shape `[filter_height, filter_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "4-D with shape `[batch, out_height, out_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "in_backprop"
                                +  description: "4-D with shape `[batch, in_height, in_width, depth]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "rates"
                                +  type: "list(int)"
                                +  description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Returns x / y element-wise.
                                +--
                                +-- *NOTE*: `Div` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +div :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                   (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                   Data.Int.Int32, Data.Int.Int64,
                                +                                   Data.Int.Int8, Data.Word.Word16,
                                +                                   Data.Word.Word8, Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor v'2 t -- ^ __y__
                                +       -> Tensor Build t -- ^ __z__
                                +div = div' id
                                +div' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor v'2 t -- ^ __y__
                                +        -> Tensor Build t -- ^ __z__
                                +div' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Div"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Draw bounding boxes on a batch of images.
                                +--
                                +-- Outputs a copy of `images` but draws on top of the pixels zero or more bounding
                                +-- boxes specified by the locations in `boxes`. The coordinates of the each
                                +-- bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
                                +-- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
                                +-- height of the underlying image.
                                +-- 
                                +-- For example, if an image is 100 x 200 pixels and the bounding box is
                                +-- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the
                                +-- bounding box will be `(10, 40)` to `(50, 180)`.
                                +-- 
                                +-- Parts of the bounding box may fall outside the image.
                                +drawBoundingBoxes :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => 
                                +                     Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, depth]`. A batch of images.
                                +                     -> Tensor v'2 Float -- ^ __boxes__: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
                                +                                         -- boxes.
                                +                     -> Tensor Build t -- ^ __output__: 4-D with the same shape as `images`. The batch of input images with
                                +                     -- bounding boxes drawn on the images.
                                +drawBoundingBoxes = drawBoundingBoxes' id
                                +drawBoundingBoxes' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) =>
                                +                      OpParams ->
                                +                      Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, depth]`. A batch of images.
                                +                      -> Tensor v'2 Float -- ^ __boxes__: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
                                +                                          -- boxes.
                                +                      -> Tensor Build t -- ^ __output__: 4-D with the same shape as `images`. The batch of input images with
                                +                      -- bounding boxes drawn on the images.
                                +drawBoundingBoxes' op'options images boxes | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs boxes]
                                +        return (opDef "DrawBoundingBoxes"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "4-D with shape `[batch, height, width, depth]`. A batch of images."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "boxes"
                                +  description: "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
                                +}
                                +-}
                                +
                                +-- | Partitions `data` into `num_partitions` tensors using indices from `partitions`.
                                +--
                                +-- For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
                                +-- becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`
                                +-- are placed in `outputs[i]` in lexicographic order of `js`, and the first
                                +-- dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
                                +-- In detail,
                                +-- 
                                +-- ```python
                                +--     outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
                                +-- 
                                +--     outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
                                +-- ```
                                +-- 
                                +-- `data.shape` must start with `partitions.shape`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```python
                                +--     # Scalar partitions.
                                +--     partitions = 1
                                +--     num_partitions = 2
                                +--     data = [10, 20]
                                +--     outputs[0] = []  # Empty with shape [0, 2]
                                +--     outputs[1] = [[10, 20]]
                                +-- 
                                +--     # Vector partitions.
                                +--     partitions = [0, 0, 1, 1, 0]
                                +--     num_partitions = 2
                                +--     data = [10, 20, 30, 40, 50]
                                +--     outputs[0] = [10, 20, 50]
                                +--     outputs[1] = [30, 40]
                                +-- ```
                                +-- 
                                +-- See `dynamic_stitch` for an example on how to merge partitions back.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
                                +-- </div>
                                +dynamicPartition :: forall v'1 v'2 t . (TensorType t) => 
                                +                    Data.Int.Int64 -- ^ __num_partitions__: The number of partitions to output.
                                +                    -> Tensor v'1 t -- ^ __data__
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __partitions__: Any shape.  Indices in the range `[0, num_partitions)`.
                                +                    -> [Tensor Build t] -- ^ __outputs__
                                +dynamicPartition = dynamicPartition' id
                                +dynamicPartition' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +                     Data.Int.Int64 -- ^ __num_partitions__: The number of partitions to output.
                                +                     -> Tensor v'1 t -- ^ __data__
                                +                     -> Tensor v'2 Data.Int.Int32 -- ^ __partitions__: Any shape.  Indices in the range `[0, num_partitions)`.
                                +                     -> [Tensor Build t] -- ^ __outputs__
                                +dynamicPartition' op'options num_partitions data'
                                +                  partitions | eqLengthGuard [] =
                                +    pureOp [num_partitions] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs partitions]
                                +        return (opDef "DynamicPartition"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "num_partitions" .~ num_partitions
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "partitions"
                                +  description: "Any shape.  Indices in the range `[0, num_partitions)`."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "outputs" type_attr: "T" number_attr: "num_partitions"
                                +}
                                +attr {
                                +  name: "num_partitions"
                                +  type: "int"
                                +  description: "The number of partitions to output."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Interleave the values from the `data` tensors into a single tensor.
                                +--
                                +-- Builds a merged tensor such that
                                +-- 
                                +-- ```python
                                +--     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
                                +-- ```
                                +-- 
                                +-- For example, if each `indices[m]` is scalar or vector, we have
                                +-- 
                                +-- ```python
                                +--     # Scalar indices:
                                +--     merged[indices[m], ...] = data[m][...]
                                +-- 
                                +--     # Vector indices:
                                +--     merged[indices[m][i], ...] = data[m][i, ...]
                                +-- ```
                                +-- 
                                +-- Each `data[i].shape` must start with the corresponding `indices[i].shape`,
                                +-- and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
                                +-- must have `data[i].shape = indices[i].shape + constant`.  In terms of this
                                +-- `constant`, the output shape is
                                +-- 
                                +--     merged.shape = [max(indices)] + constant
                                +-- 
                                +-- Values are merged in order, so if an index appears in both `indices[m][i]` and
                                +-- `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
                                +-- merged result.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```python
                                +--     indices[0] = 6
                                +--     indices[1] = [4, 1]
                                +--     indices[2] = [[5, 2], [0, 3]]
                                +--     data[0] = [61, 62]
                                +--     data[1] = [[41, 42], [11, 12]]
                                +--     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
                                +--     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
                                +--               [51, 52], [61, 62]]
                                +-- ```
                                +-- 
                                +-- This method can be used to merge partitions created by `dynamic_partition`
                                +-- as illustrated on the following example:
                                +-- 
                                +-- ```python
                                +--     # Apply function (increments x_i) on elements for which a certain condition
                                +--     # apply (x_i != -1 in this example).
                                +--     x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
                                +--     condition_mask=tf.not_equal(x,tf.constant(-1.))
                                +--     partitioned_data = tf.dynamic_partition(
                                +--         x, tf.cast(condition_mask, tf.int32) , 2)
                                +--     partitioned_data[1] = partitioned_data[1] + 1.0
                                +--     condition_indices = tf.dynamic_partition(
                                +--         tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
                                +--     x = tf.dynamic_stitch(condition_indices, partitioned_data)
                                +--     # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
                                +--     # unchanged.
                                +-- ```
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
                                +-- </div>
                                +dynamicStitch :: forall v'1 v'2 t . (TensorType t) => 
                                +                 [Tensor v'1 Data.Int.Int32] -- ^ __indices__
                                +                 -> [Tensor v'2 t] -- ^ __data__
                                +                 -> Tensor Build t -- ^ __merged__
                                +dynamicStitch = dynamicStitch' id
                                +dynamicStitch' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +                  [Tensor v'1 Data.Int.Int32] -- ^ __indices__
                                +                  -> [Tensor v'2 t] -- ^ __data__
                                +                  -> Tensor Build t -- ^ __merged__
                                +dynamicStitch' op'options indices
                                +               data' | eqLengthGuard [("N", [("indices", length indices),
                                +                                             ("data", length data')])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices,
                                +                                                             buildInputs data']
                                +        return (opDef "DynamicStitch"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length indices) :: Int64
                                +{-
                                +input_arg { name: "indices" type: DT_INT32 number_attr: "N" }
                                +input_arg { name: "data" type_attr: "T" number_attr: "N" }
                                +output_arg { name: "merged" type_attr: "T" }
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Computes the (possibly normalized) Levenshtein Edit Distance.
                                +--
                                +-- The inputs are variable-length sequences provided by SparseTensors
                                +--   (hypothesis_indices, hypothesis_values, hypothesis_shape)
                                +-- and
                                +--   (truth_indices, truth_values, truth_shape).
                                +-- 
                                +-- The inputs are:
                                +editDistance :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (TensorType t) => 
                                +                Tensor v'1 Data.Int.Int64 -- ^ __hypothesis_indices__: The indices of the hypothesis list SparseTensor.
                                +                                          -- This is an N x R int64 matrix.
                                +                -> Tensor v'2 t -- ^ __hypothesis_values__: The values of the hypothesis list SparseTensor.
                                +                                -- This is an N-length vector.
                                +                -> Tensor v'3 Data.Int.Int64 -- ^ __hypothesis_shape__: The shape of the hypothesis list SparseTensor.
                                +                                             -- This is an R-length vector.
                                +                -> Tensor v'4 Data.Int.Int64 -- ^ __truth_indices__: The indices of the truth list SparseTensor.
                                +                                             -- This is an M x R int64 matrix.
                                +                -> Tensor v'5 t -- ^ __truth_values__: The values of the truth list SparseTensor.
                                +                                -- This is an M-length vector.
                                +                -> Tensor v'6 Data.Int.Int64 -- ^ __truth_shape__: truth indices, vector.
                                +                -> Tensor Build Float -- ^ __output__: A dense float tensor with rank R - 1.
                                +                -- 
                                +                -- For the example input:
                                +                -- 
                                +                --     // hypothesis represents a 2x1 matrix with variable-length values:
                                +                --     //   (0,0) = ["a"]
                                +                --     //   (1,0) = ["b"]
                                +                --     hypothesis_indices = [[0, 0, 0],
                                +                --                           [1, 0, 0]]
                                +                --     hypothesis_values = ["a", "b"]
                                +                --     hypothesis_shape = [2, 1, 1]
                                +                -- 
                                +                --     // truth represents a 2x2 matrix with variable-length values:
                                +                --     //   (0,0) = []
                                +                --     //   (0,1) = ["a"]
                                +                --     //   (1,0) = ["b", "c"]
                                +                --     //   (1,1) = ["a"]
                                +                --     truth_indices = [[0, 1, 0],
                                +                --                      [1, 0, 0],
                                +                --                      [1, 0, 1],
                                +                --                      [1, 1, 0]]
                                +                --     truth_values = ["a", "b", "c", "a"]
                                +                --     truth_shape = [2, 2, 2]
                                +                --     normalize = true
                                +                -- 
                                +                -- The output will be:
                                +                -- 
                                +                --     // output is a 2x2 matrix with edit distances normalized by truth lengths.
                                +                --     output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis
                                +                --               [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis
                                +editDistance = editDistance' id
                                +editDistance' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (TensorType t) =>
                                +                 OpParams ->
                                +                 Tensor v'1 Data.Int.Int64 -- ^ __hypothesis_indices__: The indices of the hypothesis list SparseTensor.
                                +                                           -- This is an N x R int64 matrix.
                                +                 -> Tensor v'2 t -- ^ __hypothesis_values__: The values of the hypothesis list SparseTensor.
                                +                                 -- This is an N-length vector.
                                +                 -> Tensor v'3 Data.Int.Int64 -- ^ __hypothesis_shape__: The shape of the hypothesis list SparseTensor.
                                +                                              -- This is an R-length vector.
                                +                 -> Tensor v'4 Data.Int.Int64 -- ^ __truth_indices__: The indices of the truth list SparseTensor.
                                +                                              -- This is an M x R int64 matrix.
                                +                 -> Tensor v'5 t -- ^ __truth_values__: The values of the truth list SparseTensor.
                                +                                 -- This is an M-length vector.
                                +                 -> Tensor v'6 Data.Int.Int64 -- ^ __truth_shape__: truth indices, vector.
                                +                 -> Tensor Build Float -- ^ __output__: A dense float tensor with rank R - 1.
                                +                 -- 
                                +                 -- For the example input:
                                +                 -- 
                                +                 --     // hypothesis represents a 2x1 matrix with variable-length values:
                                +                 --     //   (0,0) = ["a"]
                                +                 --     //   (1,0) = ["b"]
                                +                 --     hypothesis_indices = [[0, 0, 0],
                                +                 --                           [1, 0, 0]]
                                +                 --     hypothesis_values = ["a", "b"]
                                +                 --     hypothesis_shape = [2, 1, 1]
                                +                 -- 
                                +                 --     // truth represents a 2x2 matrix with variable-length values:
                                +                 --     //   (0,0) = []
                                +                 --     //   (0,1) = ["a"]
                                +                 --     //   (1,0) = ["b", "c"]
                                +                 --     //   (1,1) = ["a"]
                                +                 --     truth_indices = [[0, 1, 0],
                                +                 --                      [1, 0, 0],
                                +                 --                      [1, 0, 1],
                                +                 --                      [1, 1, 0]]
                                +                 --     truth_values = ["a", "b", "c", "a"]
                                +                 --     truth_shape = [2, 2, 2]
                                +                 --     normalize = true
                                +                 -- 
                                +                 -- The output will be:
                                +                 -- 
                                +                 --     // output is a 2x2 matrix with edit distances normalized by truth lengths.
                                +                 --     output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis
                                +                 --               [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis
                                +editDistance' op'options hypothesis_indices hypothesis_values hypothesis_shape
                                +              truth_indices truth_values truth_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs hypothesis_indices,
                                +                                                             buildInputs hypothesis_values,
                                +                                                             buildInputs hypothesis_shape,
                                +                                                             buildInputs truth_indices,
                                +                                                             buildInputs truth_values,
                                +                                                             buildInputs truth_shape]
                                +        return (opDef "EditDistance"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "hypothesis_indices"
                                +  description: "The indices of the hypothesis list SparseTensor.\nThis is an N x R int64 matrix."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "hypothesis_values"
                                +  description: "The values of the hypothesis list SparseTensor.\nThis is an N-length vector."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "hypothesis_shape"
                                +  description: "The shape of the hypothesis list SparseTensor.\nThis is an R-length vector."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "truth_indices"
                                +  description: "The indices of the truth list SparseTensor.\nThis is an M x R int64 matrix."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "truth_values"
                                +  description: "The values of the truth list SparseTensor.\nThis is an M-length vector."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "truth_shape"
                                +  description: "truth indices, vector."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A dense float tensor with rank R - 1.\n\nFor the example input:\n\n    // hypothesis represents a 2x1 matrix with variable-length values:\n    //   (0,0) = [\"a\"]\n    //   (1,0) = [\"b\"]\n    hypothesis_indices = [[0, 0, 0],\n                          [1, 0, 0]]\n    hypothesis_values = [\"a\", \"b\"]\n    hypothesis_shape = [2, 1, 1]\n\n    // truth represents a 2x2 matrix with variable-length values:\n    //   (0,0) = []\n    //   (0,1) = [\"a\"]\n    //   (1,0) = [\"b\", \"c\"]\n    //   (1,1) = [\"a\"]\n    truth_indices = [[0, 1, 0],\n                     [1, 0, 0],\n                     [1, 0, 1],\n                     [1, 1, 0]]\n    truth_values = [\"a\", \"b\", \"c\", \"a\"]\n    truth_shape = [2, 2, 2]\n    normalize = true\n\nThe output will be:\n\n    // output is a 2x2 matrix with edit distances normalized by truth lengths.\n    output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis\n              [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis"
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "normalize"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "boolean (if true, edit distances are normalized by length of truth).\n\nThe output is:"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
                                +--
                                +-- See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
                                +-- ](http://arxiv.org/abs/1511.07289)
                                +elu :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __features__
                                +       -> Tensor Build t -- ^ __activations__
                                +elu = elu' id
                                +elu' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +        OpParams ->
                                +        Tensor v'1 t -- ^ __features__
                                +        -> Tensor Build t -- ^ __activations__
                                +elu' op'options features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features]
                                +        return (opDef "Elu"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "features" type_attr: "T" }
                                +output_arg { name: "activations" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes gradients for the exponential linear (Elu) operation.
                                +
                                +eluGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Elu operation.
                                +           -> Tensor v'2 t -- ^ __outputs__: The outputs of the corresponding Elu operation.
                                +           -> Tensor Build t -- ^ __backprops__: The gradients: `gradients * (outputs + 1)` if outputs < 0,
                                +           -- `gradients` otherwise.
                                +eluGrad = eluGrad' id
                                +eluGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Elu operation.
                                +            -> Tensor v'2 t -- ^ __outputs__: The outputs of the corresponding Elu operation.
                                +            -> Tensor Build t -- ^ __backprops__: The gradients: `gradients * (outputs + 1)` if outputs < 0,
                                +            -- `gradients` otherwise.
                                +eluGrad' op'options gradients outputs | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients,
                                +                                                             buildInputs outputs]
                                +        return (opDef "EluGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "gradients"
                                +  description: "The backpropagated gradients to the corresponding Elu operation."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "outputs"
                                +  description: "The outputs of the corresponding Elu operation."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "backprops"
                                +  description: "The gradients: `gradients * (outputs + 1)` if outputs < 0,\n`gradients` otherwise."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Encode strings into web-safe base64 format.
                                +--
                                +-- Refer to the following article for more information on base64 format:
                                +-- en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
                                +-- end so that the encoded has length multiple of 4. See Padding section of the
                                +-- link above.
                                +-- 
                                +-- Web-safe means that the encoder uses - and _ instead of + and /.
                                +encodeBase64 :: 
                                +                Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Strings to be encoded.
                                +                -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Input strings encoded in base64.
                                +encodeBase64 = encodeBase64' id
                                +encodeBase64' :: OpParams ->
                                +                 Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Strings to be encoded.
                                +                 -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Input strings encoded in base64.
                                +encodeBase64' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "EncodeBase64"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "Strings to be encoded." type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Input strings encoded in base64."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "pad"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Bool whether padding is applied at the ends."
                                +}
                                +-}
                                +
                                +-- | JPEG-encode an image.
                                +--
                                +-- `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
                                +-- 
                                +-- The attr `format` can be used to override the color format of the encoded
                                +-- output.  Values can be:
                                +-- 
                                +-- *   `''`: Use a default format based on the number of channels in the image.
                                +-- *   `grayscale`: Output a grayscale JPEG image.  The `channels` dimension
                                +--     of `image` must be 1.
                                +-- *   `rgb`: Output an RGB JPEG image. The `channels` dimension
                                +--     of `image` must be 3.
                                +-- 
                                +-- If `format` is not specified or is the empty string, a default format is picked
                                +-- in function of the number of channels in `image`:
                                +-- 
                                +-- *   1: Output a grayscale image.
                                +-- *   3: Output an RGB image.
                                +encodeJpeg :: 
                                +              Tensor v'1 Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`.
                                +              -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. JPEG-encoded image.
                                +encodeJpeg = encodeJpeg' id
                                +encodeJpeg' :: OpParams ->
                                +               Tensor v'1 Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`.
                                +               -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. JPEG-encoded image.
                                +encodeJpeg' op'options image | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image]
                                +        return (opDef "EncodeJpeg"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "image"
                                +  description: "3-D with shape `[height, width, channels]`."
                                +  type: DT_UINT8
                                +}
                                +output_arg {
                                +  name: "contents"
                                +  description: "0-D. JPEG-encoded image."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "format"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "Per pixel image format."
                                +  allowed_values { list { s: "" s: "grayscale" s: "rgb" } }
                                +}
                                +attr {
                                +  name: "quality"
                                +  type: "int"
                                +  default_value { i: 95 }
                                +  description: "Quality of the compression from 0 to 100 (higher is better and slower)."
                                +}
                                +attr {
                                +  name: "progressive"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, create a JPEG that loads progressively (coarse to fine)."
                                +}
                                +attr {
                                +  name: "optimize_size"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, spend CPU/RAM to reduce size with no quality change."
                                +}
                                +attr {
                                +  name: "chroma_downsampling"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "See http://en.wikipedia.org/wiki/Chroma_subsampling."
                                +}
                                +attr {
                                +  name: "density_unit"
                                +  type: "string"
                                +  default_value { s: "in" }
                                +  description: "Unit used to specify `x_density` and `y_density`:\npixels per inch (`\'in\'`) or centimeter (`\'cm\'`)."
                                +  allowed_values { list { s: "in" s: "cm" } }
                                +}
                                +attr {
                                +  name: "x_density"
                                +  type: "int"
                                +  default_value { i: 300 }
                                +  description: "Horizontal pixels per density unit."
                                +}
                                +attr {
                                +  name: "y_density"
                                +  type: "int"
                                +  default_value { i: 300 }
                                +  description: "Vertical pixels per density unit."
                                +}
                                +attr {
                                +  name: "xmp_metadata"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If not empty, embed this XMP metadata in the image header."
                                +}
                                +-}
                                +
                                +-- | PNG-encode an image.
                                +--
                                +-- `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
                                +-- where `channels` is:
                                +-- 
                                +-- *   1: for grayscale.
                                +-- *   2: for grayscale + alpha.
                                +-- *   3: for RGB.
                                +-- *   4: for RGBA.
                                +-- 
                                +-- The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
                                +-- default or a value from 0 to 9.  9 is the highest compression level, generating
                                +-- the smallest output, but is slower.
                                +encodePng :: forall v'1 t . (OneOf '[Data.Word.Word16, Data.Word.Word8] t) => 
                                +             Tensor v'1 t -- ^ __image__: 3-D with shape `[height, width, channels]`.
                                +             -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. PNG-encoded image.
                                +encodePng = encodePng' id
                                +encodePng' :: forall v'1 t . (OneOf '[Data.Word.Word16, Data.Word.Word8] t) =>
                                +              OpParams ->
                                +              Tensor v'1 t -- ^ __image__: 3-D with shape `[height, width, channels]`.
                                +              -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. PNG-encoded image.
                                +encodePng' op'options image | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image]
                                +        return (opDef "EncodePng"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "image"
                                +  description: "3-D with shape `[height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "contents"
                                +  description: "0-D. PNG-encoded image."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "compression"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "Compression level."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_UINT8 }
                                +  allowed_values { list { type: DT_UINT8 type: DT_UINT16 } }
                                +}
                                +-}
                                +
                                +-- | Encode audio data using the WAV file format.
                                +--
                                +-- This operation will generate a string suitable to be saved out to create a .wav
                                +-- audio file. It will be encoded in the 16-bit PCM format. It takes in float
                                +-- values in the range -1.0f to 1.0f, and any outside that value will be clamped to
                                +-- that range.
                                +-- 
                                +-- `audio` is a 2-D float Tensor of shape `[length, channels]`.
                                +-- `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
                                +encodeWav :: 
                                +             Tensor v'1 Float -- ^ __audio__: 2-D with shape `[length, channels]`.
                                +             -> Tensor v'2 Data.Int.Int32 -- ^ __sample_rate__: Scalar containing the sample frequency.
                                +             -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. WAV-encoded file contents.
                                +encodeWav = encodeWav' id
                                +encodeWav' :: OpParams ->
                                +              Tensor v'1 Float -- ^ __audio__: 2-D with shape `[length, channels]`.
                                +              -> Tensor v'2 Data.Int.Int32 -- ^ __sample_rate__: Scalar containing the sample frequency.
                                +              -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. WAV-encoded file contents.
                                +encodeWav' op'options audio sample_rate | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs audio,
                                +                                                             buildInputs sample_rate]
                                +        return (opDef "EncodeWav"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "audio"
                                +  description: "2-D with shape `[length, channels]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "sample_rate"
                                +  description: "Scalar containing the sample frequency."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "contents"
                                +  description: "0-D. WAV-encoded file contents."
                                +  type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Creates or finds a child frame, and makes `data` available to the child frame.
                                +--
                                +-- This op is used together with `Exit` to create loops in the graph.
                                +-- The unique `frame_name` is used by the `Executor` to identify frames. If
                                +-- `is_constant` is true, `output` is a constant in the child frame; otherwise
                                +-- it may be changed in the child frame. At most `parallel_iterations` iterations
                                +-- are run in parallel in the child frame.
                                +enter :: forall v'1 t . (TensorType t) => 
                                +         Tensor v'1 t -- ^ __data__: The tensor to be made available to the child frame.
                                +         -> Tensor Build t -- ^ __output__: The same tensor as `data`.
                                +enter = enter' id
                                +enter' :: forall v'1 t . (TensorType t) => OpParams ->
                                +          Tensor v'1 t -- ^ __data__: The tensor to be made available to the child frame.
                                +          -> Tensor Build t -- ^ __output__: The same tensor as `data`.
                                +enter' op'options data' | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data']
                                +        return (opDef "Enter"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "data"
                                +  description: "The tensor to be made available to the child frame."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same tensor as `data`."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "frame_name"
                                +  type: "string"
                                +  description: "The name of the child frame."
                                +}
                                +attr {
                                +  name: "is_constant"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, the output is constant within the child frame."
                                +}
                                +attr {
                                +  name: "parallel_iterations"
                                +  type: "int"
                                +  default_value { i: 10 }
                                +  description: "The number of iterations allowed to run in parallel."
                                +}
                                +-}
                                +
                                +-- | Returns the truth value of (x == y) element-wise.
                                +--
                                +-- *NOTE*: `Equal` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +equal :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float), Bool,
                                +                                     Data.ByteString.ByteString, Data.Int.Int16,
                                +                                     Data.Int.Int32, Data.Int.Int64,
                                +                                     Data.Int.Int8, Data.Word.Word16,
                                +                                     Data.Word.Word8, Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor v'2 t -- ^ __y__
                                +         -> Tensor Build Bool -- ^ __z__
                                +equal = equal' id
                                +equal' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float), Bool,
                                +                                      Data.ByteString.ByteString,
                                +                                      Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Int.Int64, Data.Int.Int8,
                                +                                      Data.Word.Word16, Data.Word.Word8, Double,
                                +                                      Float] t) => OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor v'2 t -- ^ __y__
                                +          -> Tensor Build Bool -- ^ __z__
                                +equal' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Equal"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_QUINT8
                                +      type: DT_QINT8
                                +      type: DT_QINT32
                                +      type: DT_STRING
                                +      type: DT_BOOL
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the Gauss error function of `x` element-wise.
                                +
                                +erf :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor Build t -- ^ __y__
                                +erf = erf' id
                                +erf' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +        OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +erf' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Erf"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the complementary error function of `x` element-wise.
                                +
                                +erfc :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +erfc = erfc' id
                                +erfc' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +         OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +erfc' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Erfc"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Exits the current frame to its parent frame.
                                +--
                                +-- Exit makes its input `data` available to the parent frame.
                                +exit :: forall v'1 t . (TensorType t) => 
                                +        Tensor v'1 t -- ^ __data__: The tensor to be made available to the parent frame.
                                +        -> Tensor Build t -- ^ __output__: The same tensor as `data`.
                                +exit = exit' id
                                +exit' :: forall v'1 t . (TensorType t) => OpParams ->
                                +         Tensor v'1 t -- ^ __data__: The tensor to be made available to the parent frame.
                                +         -> Tensor Build t -- ^ __output__: The same tensor as `data`.
                                +exit' op'options data' | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data']
                                +        return (opDef "Exit"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "data"
                                +  description: "The tensor to be made available to the parent frame."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same tensor as `data`."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Computes exponential of x element-wise.  \\(y = e^x\\).
                                +
                                +exp :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Data.Word.Word16,
                                +                               Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor Build t -- ^ __y__
                                +exp = exp' id
                                +exp' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                Double, Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +exp' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Exp"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Inserts a dimension of 1 into a tensor's shape.
                                +--
                                +-- Given a tensor `input`, this operation inserts a dimension of 1 at the
                                +-- dimension index `dim` of `input`'s shape. The dimension index `dim` starts at
                                +-- zero; if you specify a negative number for `dim` it is counted backward from
                                +-- the end.
                                +-- 
                                +-- This operation is useful if you want to add a batch dimension to a single
                                +-- element. For example, if you have a single image of shape `[height, width,
                                +-- channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
                                +-- which will make the shape `[1, height, width, channels]`.
                                +-- 
                                +-- Other examples:
                                +-- 
                                +-- ```
                                +-- # 't' is a tensor of shape [2]
                                +-- shape(expand_dims(t, 0)) ==> [1, 2]
                                +-- shape(expand_dims(t, 1)) ==> [2, 1]
                                +-- shape(expand_dims(t, -1)) ==> [2, 1]
                                +-- 
                                +-- # 't2' is a tensor of shape [2, 3, 5]
                                +-- shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
                                +-- shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
                                +-- shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
                                +-- ```
                                +-- 
                                +-- This operation requires that:
                                +-- 
                                +-- `-1-input.dims() <= dim <= input.dims()`
                                +-- 
                                +-- This operation is related to `squeeze()`, which removes dimensions of
                                +-- size 1.
                                +expandDims :: forall v'1 v'2 t tdim . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] tdim) =>
                                +              
                                +              Tensor v'1 t -- ^ __input__
                                +              -> Tensor v'2 tdim -- ^ __dim__: 0-D (scalar). Specifies the dimension index at which to
                                +                                 -- expand the shape of `input`.
                                +              -> Tensor Build t -- ^ __output__: Contains the same data as `input`, but its shape has an additional
                                +              -- dimension of size 1 added.
                                +expandDims = expandDims' id
                                +expandDims' :: forall v'1 v'2 t tdim . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] tdim) =>
                                +               OpParams ->
                                +               Tensor v'1 t -- ^ __input__
                                +               -> Tensor v'2 tdim -- ^ __dim__: 0-D (scalar). Specifies the dimension index at which to
                                +                                  -- expand the shape of `input`.
                                +               -> Tensor Build t -- ^ __output__: Contains the same data as `input`, but its shape has an additional
                                +               -- dimension of size 1 added.
                                +expandDims' op'options input dim | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs dim]
                                +        return (opDef "ExpandDims"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tdim" .~ tensorType (undefined :: tdim)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg {
                                +  name: "dim"
                                +  description: "0-D (scalar). Specifies the dimension index at which to\nexpand the shape of `input`."
                                +  type_attr: "Tdim"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Contains the same data as `input`, but its shape has an additional\ndimension of size 1 added."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tdim"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes exponential of x - 1 element-wise.
                                +--
                                +-- I.e., \\(y = (\exp x) - 1\\).
                                +expm1 :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +expm1 = expm1' id
                                +expm1' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float),
                                +                                  Data.Word.Word16, Double, Float] t) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +expm1' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Expm1"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Extracts a glimpse from the input tensor.
                                +--
                                +-- Returns a set of windows called glimpses extracted at location
                                +-- `offsets` from the input tensor. If the windows only partially
                                +-- overlaps the inputs, the non overlapping areas will be filled with
                                +-- random noise.
                                +-- 
                                +-- The result is a 4-D tensor of shape `[batch_size, glimpse_height,
                                +-- glimpse_width, channels]`. The channels and batch dimensions are the
                                +-- same as that of the input tensor. The height and width of the output
                                +-- windows are specified in the `size` parameter.
                                +-- 
                                +-- The argument `normalized` and `centered` controls how the windows are built:
                                +-- 
                                +-- * If the coordinates are normalized but not centered, 0.0 and 1.0
                                +--   correspond to the minimum and maximum of each height and width
                                +--   dimension.
                                +-- * If the coordinates are both normalized and centered, they range from
                                +--   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
                                +--   left corner, the lower right corner is located at (1.0, 1.0) and the
                                +--   center is at (0, 0).
                                +-- * If the coordinates are not normalized they are interpreted as
                                +--   numbers of pixels.
                                +extractGlimpse :: 
                                +                  Tensor v'1 Float -- ^ __input__: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
                                +                  -> Tensor v'2 Data.Int.Int32 -- ^ __size__: A 1-D tensor of 2 elements containing the size of the glimpses
                                +                                               -- to extract.  The glimpse height must be specified first, following
                                +                                               -- by the glimpse width.
                                +                  -> Tensor v'3 Float -- ^ __offsets__: A 2-D integer tensor of shape `[batch_size, 2]` containing
                                +                                      -- the y, x locations of the center of each window.
                                +                  -> Tensor Build Float -- ^ __glimpse__: A tensor representing the glimpses `[batch_size,
                                +                  -- glimpse_height, glimpse_width, channels]`.
                                +extractGlimpse = extractGlimpse' id
                                +extractGlimpse' :: OpParams ->
                                +                   Tensor v'1 Float -- ^ __input__: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
                                +                   -> Tensor v'2 Data.Int.Int32 -- ^ __size__: A 1-D tensor of 2 elements containing the size of the glimpses
                                +                                                -- to extract.  The glimpse height must be specified first, following
                                +                                                -- by the glimpse width.
                                +                   -> Tensor v'3 Float -- ^ __offsets__: A 2-D integer tensor of shape `[batch_size, 2]` containing
                                +                                       -- the y, x locations of the center of each window.
                                +                   -> Tensor Build Float -- ^ __glimpse__: A tensor representing the glimpses `[batch_size,
                                +                   -- glimpse_height, glimpse_width, channels]`.
                                +extractGlimpse' op'options input size offsets | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs size,
                                +                                                             buildInputs offsets]
                                +        return (opDef "ExtractGlimpse"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "A 4-D float tensor of shape `[batch_size, height, width, channels]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract.  The glimpse height must be specified first, following\nby the glimpse width."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "offsets"
                                +  description: "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe y, x locations of the center of each window."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "glimpse"
                                +  description: "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "centered"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images."
                                +}
                                +attr {
                                +  name: "normalized"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "indicates if the offset coordinates are normalized."
                                +}
                                +attr {
                                +  name: "uniform_noise"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "indicates if the noise should be generated using a\nuniform distribution or a Gaussian distribution."
                                +}
                                +-}
                                +
                                +-- | Extract `patches` from `images` and put them in the "depth" output dimension.
                                +
                                +extractImagePatches :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                               Data.Int.Int64, Data.Int.Int8,
                                +                                               Data.Word.Word16,
                                +                                               Data.Word.Word8, Double,
                                +                                               Float] t) => 
                                +                       Tensor v'1 t -- ^ __images__: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
                                +                       -> Tensor Build t -- ^ __patches__: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
                                +                       -- ksize_cols * depth]` containing image patches with size
                                +                       -- `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
                                +                       -- `out_rows` and `out_cols` are the dimensions of the output patches.
                                +extractImagePatches = extractImagePatches' id
                                +extractImagePatches' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                                Data.Int.Int64, Data.Int.Int8,
                                +                                                Data.Word.Word16,
                                +                                                Data.Word.Word8, Double,
                                +                                                Float] t) => OpParams ->
                                +                        Tensor v'1 t -- ^ __images__: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
                                +                        -> Tensor Build t -- ^ __patches__: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
                                +                        -- ksize_cols * depth]` containing image patches with size
                                +                        -- `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
                                +                        -- `out_rows` and `out_cols` are the dimensions of the output patches.
                                +extractImagePatches' op'options images | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images]
                                +        return (opDef "ExtractImagePatches"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "4-D Tensor with shape `[batch, in_rows, in_cols, depth]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "patches"
                                +  description: "4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *\nksize_cols * depth]` containing image patches with size\n`ksize_rows x ksize_cols x depth` vectorized in the \"depth\" dimension. Note\n`out_rows` and `out_cols` are the dimensions of the output patches."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksizes"
                                +  type: "list(int)"
                                +  description: "The size of the sliding window for each dimension of `images`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D of length 4. How far the centers of two consecutive patches are in\nthe images. Must be: `[1, stride_rows, stride_cols, 1]`."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "rates"
                                +  type: "list(int)"
                                +  description: "1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the\ninput stride, specifying how far two consecutive patch samples are in the\ninput. Equivalent to extracting patches with\n`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by\nsubsampling them spatially by a factor of `rates`. This is equivalent to\n`rate` in dilated (a.k.a. Atrous) convolutions."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use.\n\nWe specify the size-related attributes as:\n\n```python\n      ksizes = [1, ksize_rows, ksize_cols, 1]\n      strides = [1, strides_rows, strides_cols, 1]\n      rates = [1, rates_rows, rates_cols, 1]\n```"
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Fast Fourier transform.
                                +--
                                +-- Computes the 1-dimensional discrete Fourier transform over the inner-most
                                +-- dimension of `input`.
                                +fFT :: 
                                +       Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +       -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most
                                +       --   dimension of `input` is replaced with its 1D Fourier transform.
                                +       -- 
                                +       -- @compatibility(numpy)
                                +       -- Equivalent to np.fft.fft
                                +       -- @end_compatibility
                                +fFT = fFT' id
                                +fFT' :: OpParams ->
                                +        Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +        -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most
                                +        --   dimension of `input` is replaced with its 1D Fourier transform.
                                +        -- 
                                +        -- @compatibility(numpy)
                                +        -- Equivalent to np.fft.fft
                                +        -- @end_compatibility
                                +fFT' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "FFT"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A complex64 tensor." type: DT_COMPLEX64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A complex64 tensor of the same shape as `input`. The inner-most\n  dimension of `input` is replaced with its 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft\n@end_compatibility"
                                +  type: DT_COMPLEX64
                                +}
                                +-}
                                +
                                +-- | 2D fast Fourier transform.
                                +--
                                +-- Computes the 2-dimensional discrete Fourier transform over the inner-most
                                +-- 2 dimensions of `input`.
                                +fFT2D :: 
                                +         Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +         -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2
                                +         --   dimensions of `input` are replaced with their 2D Fourier transform.
                                +         -- 
                                +         -- @compatibility(numpy)
                                +         -- Equivalent to np.fft.fft2
                                +         -- @end_compatibility
                                +fFT2D = fFT2D' id
                                +fFT2D' :: OpParams ->
                                +          Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +          -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2
                                +          --   dimensions of `input` are replaced with their 2D Fourier transform.
                                +          -- 
                                +          -- @compatibility(numpy)
                                +          -- Equivalent to np.fft.fft2
                                +          -- @end_compatibility
                                +fFT2D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "FFT2D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A complex64 tensor." type: DT_COMPLEX64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n  dimensions of `input` are replaced with their 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft2\n@end_compatibility"
                                +  type: DT_COMPLEX64
                                +}
                                +-}
                                +
                                +-- | 3D fast Fourier transform.
                                +--
                                +-- Computes the 3-dimensional discrete Fourier transform over the inner-most 3
                                +-- dimensions of `input`.
                                +fFT3D :: 
                                +         Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +         -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3
                                +         --   dimensions of `input` are replaced with their 3D Fourier transform.
                                +         -- 
                                +         -- @compatibility(numpy)
                                +         -- Equivalent to np.fft.fftn with 3 dimensions.
                                +         -- @end_compatibility
                                +fFT3D = fFT3D' id
                                +fFT3D' :: OpParams ->
                                +          Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +          -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3
                                +          --   dimensions of `input` are replaced with their 3D Fourier transform.
                                +          -- 
                                +          -- @compatibility(numpy)
                                +          -- Equivalent to np.fft.fftn with 3 dimensions.
                                +          -- @end_compatibility
                                +fFT3D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "FFT3D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A complex64 tensor." type: DT_COMPLEX64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n  dimensions of `input` are replaced with their 3D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fftn with 3 dimensions.\n@end_compatibility"
                                +  type: DT_COMPLEX64
                                +}
                                +-}
                                +
                                +-- | A queue that produces elements in first-in first-out order.
                                +
                                +fIFOQueue :: forall m' . (MonadBuild m') => 
                                +             [DataType] -- ^ __component_types__: The type of each component in a value.
                                +             -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
                                +fIFOQueue = fIFOQueue' id
                                +fIFOQueue' :: forall m' . (MonadBuild m') => OpParams ->
                                +              [DataType] -- ^ __component_types__: The type of each component in a value.
                                +              -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
                                +fIFOQueue' op'options component_types | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "FIFOQueue"
                                +                    & opAttr "component_types" .~ component_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "shapes"
                                +  type: "list(shape)"
                                +  default_value { list { } }
                                +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | A queue that produces elements in first-in first-out order.
                                +
                                +fIFOQueueV2 :: forall m' . (MonadBuild m') => 
                                +               [DataType] -- ^ __component_types__: The type of each component in a value.
                                +               -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue.
                                +fIFOQueueV2 = fIFOQueueV2' id
                                +fIFOQueueV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                [DataType] -- ^ __component_types__: The type of each component in a value.
                                +                -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue.
                                +fIFOQueueV2' op'options component_types | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "FIFOQueueV2"
                                +                    & opAttr "component_types" .~ component_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the queue."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "shapes"
                                +  type: "list(shape)"
                                +  default_value { list { } }
                                +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | Output a fact about factorials.
                                +
                                +fact :: 
                                +        Tensor Build Data.ByteString.ByteString -- ^ __fact__
                                +fact = fact' id
                                +fact' :: OpParams ->
                                +         Tensor Build Data.ByteString.ByteString -- ^ __fact__
                                +fact' op'options | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        return (opDef "Fact"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "fact" type: DT_STRING }
                                +-}
                                +
                                +-- | Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
                                +--
                                +-- Attributes `[min; max]` define the clamping range for the `inputs` data.
                                +-- `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
                                +-- when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
                                +-- then de-quantized and output as floats in `[min; max]` interval.
                                +-- `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
                                +-- 
                                +-- Quantization is called fake since the output is still in floating point.
                                +fakeQuantWithMinMaxArgs :: 
                                +                           Tensor v'1 Float -- ^ __inputs__
                                +                           -> Tensor Build Float -- ^ __outputs__
                                +fakeQuantWithMinMaxArgs = fakeQuantWithMinMaxArgs' id
                                +fakeQuantWithMinMaxArgs' :: OpParams ->
                                +                            Tensor v'1 Float -- ^ __inputs__
                                +                            -> Tensor Build Float -- ^ __outputs__
                                +fakeQuantWithMinMaxArgs' op'options inputs | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs]
                                +        return (opDef "FakeQuantWithMinMaxArgs"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "inputs" type: DT_FLOAT }
                                +output_arg { name: "outputs" type: DT_FLOAT }
                                +attr { name: "min" type: "float" default_value { f: -6.0 } }
                                +attr { name: "max" type: "float" default_value { f: 6.0 } }
                                +attr { name: "num_bits" type: "int" default_value { i: 8 } }
                                +attr {
                                +  name: "narrow_range" type: "bool" default_value { b: false }
                                +}
                                +-}
                                +
                                +-- | Compute gradients for a FakeQuantWithMinMaxArgs operation.
                                +
                                +fakeQuantWithMinMaxArgsGradient :: 
                                +                                   Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
                                +                                   -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
                                +                                   -> Tensor Build Float -- ^ __backprops__: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
                                +                                   -- `gradients * (inputs >= min && inputs <= max)`.
                                +fakeQuantWithMinMaxArgsGradient = fakeQuantWithMinMaxArgsGradient' id
                                +fakeQuantWithMinMaxArgsGradient' :: OpParams ->
                                +                                    Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
                                +                                    -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
                                +                                    -> Tensor Build Float -- ^ __backprops__: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
                                +                                    -- `gradients * (inputs >= min && inputs <= max)`.
                                +fakeQuantWithMinMaxArgsGradient' op'options gradients
                                +                                 inputs | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients,
                                +                                                             buildInputs inputs]
                                +        return (opDef "FakeQuantWithMinMaxArgsGradient"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "gradients"
                                +  description: "Backpropagated gradients above the FakeQuantWithMinMaxArgs operation."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "inputs"
                                +  description: "Values passed as inputs to the FakeQuantWithMinMaxArgs operation."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "backprops"
                                +  description: "Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:\n`gradients * (inputs >= min && inputs <= max)`."
                                +  type: DT_FLOAT
                                +}
                                +attr { name: "min" type: "float" default_value { f: -6.0 } }
                                +attr { name: "max" type: "float" default_value { f: 6.0 } }
                                +attr { name: "num_bits" type: "int" default_value { i: 8 } }
                                +attr {
                                +  name: "narrow_range" type: "bool" default_value { b: false }
                                +}
                                +-}
                                +
                                +-- | Fake-quantize the 'inputs' tensor of type float via global float scalars `min`
                                +--
                                +-- and `max` to 'outputs' tensor of same shape as `inputs`.
                                +-- 
                                +-- `[min; max]` define the clamping range for the `inputs` data.
                                +-- `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
                                +-- when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
                                +-- then de-quantized and output as floats in `[min; max]` interval.
                                +-- `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
                                +-- 
                                +-- This operation has a gradient and thus allows for training `min` and `max`
                                +-- values.
                                +fakeQuantWithMinMaxVars :: 
                                +                           Tensor v'1 Float -- ^ __inputs__
                                +                           -> Tensor v'2 Float -- ^ __min__
                                +                           -> Tensor v'3 Float -- ^ __max__
                                +                           -> Tensor Build Float -- ^ __outputs__
                                +fakeQuantWithMinMaxVars = fakeQuantWithMinMaxVars' id
                                +fakeQuantWithMinMaxVars' :: OpParams ->
                                +                            Tensor v'1 Float -- ^ __inputs__
                                +                            -> Tensor v'2 Float -- ^ __min__
                                +                            -> Tensor v'3 Float -- ^ __max__
                                +                            -> Tensor Build Float -- ^ __outputs__
                                +fakeQuantWithMinMaxVars' op'options inputs min max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs,
                                +                                                             buildInputs min,
                                +                                                             buildInputs max]
                                +        return (opDef "FakeQuantWithMinMaxVars"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "inputs" type: DT_FLOAT }
                                +input_arg { name: "min" type: DT_FLOAT }
                                +input_arg { name: "max" type: DT_FLOAT }
                                +output_arg { name: "outputs" type: DT_FLOAT }
                                +attr { name: "num_bits" type: "int" default_value { i: 8 } }
                                +attr {
                                +  name: "narrow_range" type: "bool" default_value { b: false }
                                +}
                                +-}
                                +
                                +-- | Compute gradients for a FakeQuantWithMinMaxVars operation.
                                +
                                +fakeQuantWithMinMaxVarsGradient :: 
                                +                                   Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
                                +                                   -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
                                +                                                       -- min, max: Quantization interval, scalar floats.
                                +                                   -> Tensor v'3 Float -- ^ __min__
                                +                                   -> Tensor v'4 Float -- ^ __max__
                                +                                   -> (Tensor Build Float, Tensor Build Float,
                                +                                       Tensor Build Float)
                                +                                   -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__)
                                +                                   --
                                +                                   -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs:
                                +                                   -- `gradients * (inputs >= min && inputs <= max)`.
                                +                                   --
                                +                                   -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter:
                                +                                   -- `sum(gradients * (inputs < min))`.
                                +                                   --
                                +                                   -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter:
                                +                                   -- `sum(gradients * (inputs > max))`.
                                +fakeQuantWithMinMaxVarsGradient = fakeQuantWithMinMaxVarsGradient' id
                                +fakeQuantWithMinMaxVarsGradient' :: OpParams ->
                                +                                    Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
                                +                                    -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
                                +                                                        -- min, max: Quantization interval, scalar floats.
                                +                                    -> Tensor v'3 Float -- ^ __min__
                                +                                    -> Tensor v'4 Float -- ^ __max__
                                +                                    -> (Tensor Build Float, Tensor Build Float,
                                +                                        Tensor Build Float)
                                +                                    -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__)
                                +                                    --
                                +                                    -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs:
                                +                                    -- `gradients * (inputs >= min && inputs <= max)`.
                                +                                    --
                                +                                    -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter:
                                +                                    -- `sum(gradients * (inputs < min))`.
                                +                                    --
                                +                                    -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter:
                                +                                    -- `sum(gradients * (inputs > max))`.
                                +fakeQuantWithMinMaxVarsGradient' op'options gradients inputs min
                                +                                 max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients,
                                +                                                             buildInputs inputs,
                                +                                                             buildInputs min,
                                +                                                             buildInputs max]
                                +        return (opDef "FakeQuantWithMinMaxVarsGradient"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "gradients"
                                +  description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "inputs"
                                +  description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation.\nmin, max: Quantization interval, scalar floats."
                                +  type: DT_FLOAT
                                +}
                                +input_arg { name: "min" type: DT_FLOAT }
                                +input_arg { name: "max" type: DT_FLOAT }
                                +output_arg {
                                +  name: "backprops_wrt_input"
                                +  description: "Backpropagated gradients w.r.t. inputs:\n`gradients * (inputs >= min && inputs <= max)`."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "backprop_wrt_min"
                                +  description: "Backpropagated gradients w.r.t. min parameter:\n`sum(gradients * (inputs < min))`."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "backprop_wrt_max"
                                +  description: "Backpropagated gradients w.r.t. max parameter:\n`sum(gradients * (inputs > max))`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "num_bits"
                                +  type: "int"
                                +  default_value { i: 8 }
                                +  description: "The bitwidth of the quantization; between 2 and 8, inclusive."
                                +}
                                +attr {
                                +  name: "narrow_range"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Whether to quantize into 2^num_bits - 1 distinct values."
                                +}
                                +-}
                                +
                                +-- | Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
                                +--
                                +-- `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
                                +-- to 'outputs' tensor of same shape as `inputs`.
                                +-- 
                                +-- `[min; max]` define the clamping range for the `inputs` data.
                                +-- `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
                                +-- when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
                                +-- then de-quantized and output as floats in `[min; max]` interval.
                                +-- `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
                                +-- 
                                +-- This operation has a gradient and thus allows for training `min` and `max`
                                +-- values.
                                +fakeQuantWithMinMaxVarsPerChannel :: 
                                +                                     Tensor v'1 Float -- ^ __inputs__
                                +                                     -> Tensor v'2 Float -- ^ __min__
                                +                                     -> Tensor v'3 Float -- ^ __max__
                                +                                     -> Tensor Build Float -- ^ __outputs__
                                +fakeQuantWithMinMaxVarsPerChannel = fakeQuantWithMinMaxVarsPerChannel' id
                                +fakeQuantWithMinMaxVarsPerChannel' :: OpParams ->
                                +                                      Tensor v'1 Float -- ^ __inputs__
                                +                                      -> Tensor v'2 Float -- ^ __min__
                                +                                      -> Tensor v'3 Float -- ^ __max__
                                +                                      -> Tensor Build Float -- ^ __outputs__
                                +fakeQuantWithMinMaxVarsPerChannel' op'options inputs min
                                +                                   max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs,
                                +                                                             buildInputs min,
                                +                                                             buildInputs max]
                                +        return (opDef "FakeQuantWithMinMaxVarsPerChannel"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "inputs" type: DT_FLOAT }
                                +input_arg { name: "min" type: DT_FLOAT }
                                +input_arg { name: "max" type: DT_FLOAT }
                                +output_arg { name: "outputs" type: DT_FLOAT }
                                +attr { name: "num_bits" type: "int" default_value { i: 8 } }
                                +attr {
                                +  name: "narrow_range" type: "bool" default_value { b: false }
                                +}
                                +-}
                                +
                                +-- | Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
                                +
                                +fakeQuantWithMinMaxVarsPerChannelGradient :: 
                                +                                             Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
                                +                                                              -- shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
                                +                                             -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
                                +                                                                 --   same as `gradients`.
                                +                                                                 -- min, max: Quantization interval, floats of shape `[d]`.
                                +                                             -> Tensor v'3 Float -- ^ __min__
                                +                                             -> Tensor v'4 Float -- ^ __max__
                                +                                             -> (Tensor Build Float,
                                +                                                 Tensor Build Float,
                                +                                                 Tensor Build Float)
                                +                                             -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__)
                                +                                             --
                                +                                             -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs, shape same as
                                +                                             -- `inputs`:
                                +                                             --   `gradients * (inputs >= min && inputs <= max)`.
                                +                                             --
                                +                                             -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter, shape `[d]`:
                                +                                             -- `sum_per_d(gradients * (inputs < min))`.
                                +                                             --
                                +                                             -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter, shape `[d]`:
                                +                                             -- `sum_per_d(gradients * (inputs > max))`.
                                +fakeQuantWithMinMaxVarsPerChannelGradient = fakeQuantWithMinMaxVarsPerChannelGradient' id
                                +fakeQuantWithMinMaxVarsPerChannelGradient' :: OpParams ->
                                +                                              Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
                                +                                                               -- shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
                                +                                              -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
                                +                                                                  --   same as `gradients`.
                                +                                                                  -- min, max: Quantization interval, floats of shape `[d]`.
                                +                                              -> Tensor v'3 Float -- ^ __min__
                                +                                              -> Tensor v'4 Float -- ^ __max__
                                +                                              -> (Tensor Build Float,
                                +                                                  Tensor Build Float,
                                +                                                  Tensor Build Float)
                                +                                              -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__)
                                +                                              --
                                +                                              -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs, shape same as
                                +                                              -- `inputs`:
                                +                                              --   `gradients * (inputs >= min && inputs <= max)`.
                                +                                              --
                                +                                              -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter, shape `[d]`:
                                +                                              -- `sum_per_d(gradients * (inputs < min))`.
                                +                                              --
                                +                                              -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter, shape `[d]`:
                                +                                              -- `sum_per_d(gradients * (inputs > max))`.
                                +fakeQuantWithMinMaxVarsPerChannelGradient' op'options gradients inputs min
                                +                                           max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients,
                                +                                                             buildInputs inputs,
                                +                                                             buildInputs min,
                                +                                                             buildInputs max]
                                +        return (opDef "FakeQuantWithMinMaxVarsPerChannelGradient"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "gradients"
                                +  description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation,\nshape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "inputs"
                                +  description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape\n  same as `gradients`.\nmin, max: Quantization interval, floats of shape `[d]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg { name: "min" type: DT_FLOAT }
                                +input_arg { name: "max" type: DT_FLOAT }
                                +output_arg {
                                +  name: "backprops_wrt_input"
                                +  description: "Backpropagated gradients w.r.t. inputs, shape same as\n`inputs`:\n  `gradients * (inputs >= min && inputs <= max)`."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "backprop_wrt_min"
                                +  description: "Backpropagated gradients w.r.t. min parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs < min))`."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "backprop_wrt_max"
                                +  description: "Backpropagated gradients w.r.t. max parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs > max))`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "num_bits"
                                +  type: "int"
                                +  default_value { i: 8 }
                                +  description: "The bitwidth of the quantization; between 2 and 8, inclusive."
                                +}
                                +attr {
                                +  name: "narrow_range"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Whether to quantize into 2^num_bits - 1 distinct values."
                                +}
                                +-}
                                +
                                +-- | Deprecated. Do not use.
                                +
                                +fakeQueue :: forall v'1 m' . (MonadBuild m') => 
                                +             Tensor v'1 ResourceHandle -- ^ __resource__
                                +             -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__
                                +fakeQueue = fakeQueue' id
                                +fakeQueue' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +              Tensor v'1 ResourceHandle -- ^ __resource__
                                +              -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__
                                +fakeQueue' op'options resource | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource]
                                +        buildOp [] (opDef "FakeQueue"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "resource" type: DT_RESOURCE }
                                +output_arg { name: "handle" type: DT_STRING is_ref: true }
                                +-}
                                +
                                +-- | Creates a tensor filled with a scalar value.
                                +--
                                +-- This operation creates a tensor of shape `dims` and fills it with `value`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # Output tensor has shape [2, 3].
                                +-- fill([2, 3], 9) ==> [[9, 9, 9]
                                +--                      [9, 9, 9]]
                                +-- ```
                                +fill :: forall v'1 v'2 t . (TensorType t) => 
                                +        Tensor v'1 Data.Int.Int32 -- ^ __dims__: 1-D. Represents the shape of the output tensor.
                                +        -> Tensor v'2 t -- ^ __value__: 0-D (scalar). Value to fill the returned tensor.
                                +                        -- 
                                +                        -- @compatibility(numpy)
                                +                        -- Equivalent to np.full
                                +                        -- @end_compatibility
                                +        -> Tensor Build t -- ^ __output__
                                +fill = fill' id
                                +fill' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +         Tensor v'1 Data.Int.Int32 -- ^ __dims__: 1-D. Represents the shape of the output tensor.
                                +         -> Tensor v'2 t -- ^ __value__: 0-D (scalar). Value to fill the returned tensor.
                                +                         -- 
                                +                         -- @compatibility(numpy)
                                +                         -- Equivalent to np.full
                                +                         -- @end_compatibility
                                +         -> Tensor Build t -- ^ __output__
                                +fill' op'options dims value | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs dims,
                                +                                                             buildInputs value]
                                +        return (opDef "Fill"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "dims"
                                +  description: "1-D. Represents the shape of the output tensor."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "0-D (scalar). Value to fill the returned tensor.\n\n@compatibility(numpy)\nEquivalent to np.full\n@end_compatibility"
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Creates a dataset that emits the records from one or more binary files.
                                +
                                +fixedLengthRecordDataset :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => 
                                +                            Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__: A scalar or a vector containing the name(s) of the file(s) to be
                                +                                                                  -- read.
                                +                            -> Tensor v'2 Data.Int.Int64 -- ^ __header_bytes__: A scalar representing the number of bytes to skip at the
                                +                                                         -- beginning of a file.
                                +                            -> Tensor v'3 Data.Int.Int64 -- ^ __record_bytes__: A scalar representing the number of bytes in each record.
                                +                            -> Tensor v'4 Data.Int.Int64 -- ^ __footer_bytes__: A scalar representing the number of bytes to skip at the end
                                +                                                         -- of a file.
                                +                            -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +fixedLengthRecordDataset = fixedLengthRecordDataset' id
                                +fixedLengthRecordDataset' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') =>
                                +                             OpParams ->
                                +                             Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__: A scalar or a vector containing the name(s) of the file(s) to be
                                +                                                                   -- read.
                                +                             -> Tensor v'2 Data.Int.Int64 -- ^ __header_bytes__: A scalar representing the number of bytes to skip at the
                                +                                                          -- beginning of a file.
                                +                             -> Tensor v'3 Data.Int.Int64 -- ^ __record_bytes__: A scalar representing the number of bytes in each record.
                                +                             -> Tensor v'4 Data.Int.Int64 -- ^ __footer_bytes__: A scalar representing the number of bytes to skip at the end
                                +                                                          -- of a file.
                                +                             -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +fixedLengthRecordDataset' op'options filenames header_bytes record_bytes
                                +                          footer_bytes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filenames,
                                +                                                             buildInputs header_bytes,
                                +                                                             buildInputs record_bytes,
                                +                                                             buildInputs footer_bytes]
                                +        buildOp [] (opDef "FixedLengthRecordDataset"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "filenames"
                                +  description: "A scalar or a vector containing the name(s) of the file(s) to be\nread."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "header_bytes"
                                +  description: "A scalar representing the number of bytes to skip at the\nbeginning of a file."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "record_bytes"
                                +  description: "A scalar representing the number of bytes in each record."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "footer_bytes"
                                +  description: "A scalar representing the number of bytes to skip at the end\nof a file."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +-}
                                +
                                +-- | A Reader that outputs fixed-length records from a file.
                                +
                                +fixedLengthRecordReader :: forall m' . (MonadBuild m') => 
                                +                           Data.Int.Int64 -- ^ __record_bytes__: Number of bytes in the record.
                                +                           -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +fixedLengthRecordReader = fixedLengthRecordReader' id
                                +fixedLengthRecordReader' :: forall m' . (MonadBuild m') => OpParams ->
                                +                            Data.Int.Int64 -- ^ __record_bytes__: Number of bytes in the record.
                                +                            -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +fixedLengthRecordReader' op'options record_bytes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "FixedLengthRecordReader"
                                +                    & opAttr "record_bytes" .~ record_bytes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "header_bytes"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of bytes in the header, defaults to 0."
                                +}
                                +attr {
                                +  name: "record_bytes"
                                +  type: "int"
                                +  description: "Number of bytes in the record."
                                +}
                                +attr {
                                +  name: "footer_bytes"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of bytes in the footer, defaults to 0."
                                +}
                                +attr {
                                +  name: "hop_bytes"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of bytes to hop before each read. Default of 0 means using\nrecord_bytes."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +-}
                                +
                                +-- | A Reader that outputs fixed-length records from a file.
                                +
                                +fixedLengthRecordReaderV2 :: forall m' . (MonadBuild m') => 
                                +                             Data.Int.Int64 -- ^ __record_bytes__: Number of bytes in the record.
                                +                             -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +fixedLengthRecordReaderV2 = fixedLengthRecordReaderV2' id
                                +fixedLengthRecordReaderV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                              Data.Int.Int64 -- ^ __record_bytes__: Number of bytes in the record.
                                +                              -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +fixedLengthRecordReaderV2' op'options record_bytes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "FixedLengthRecordReaderV2"
                                +                    & opAttr "record_bytes" .~ record_bytes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "header_bytes"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of bytes in the header, defaults to 0."
                                +}
                                +attr {
                                +  name: "record_bytes"
                                +  type: "int"
                                +  description: "Number of bytes in the record."
                                +}
                                +attr {
                                +  name: "footer_bytes"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of bytes in the footer, defaults to 0."
                                +}
                                +attr {
                                +  name: "hop_bytes"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of bytes to hop before each read. Default of 0 means using\nrecord_bytes."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +attr {
                                +  name: "encoding"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "The type of encoding for the file. Currently ZLIB and GZIP\nare supported. Defaults to none."
                                +}
                                +-}
                                +
                                +-- | Generates labels for candidate sampling with a learned unigram distribution.
                                +--
                                +-- A unigram sampler could use a fixed unigram distribution read from a
                                +-- file or passed in as an in-memory array instead of building up the distribution
                                +-- from data on the fly. There is also an option to skew the distribution by
                                +-- applying a distortion power to the weights.
                                +-- 
                                +-- The vocabulary file should be in CSV-like format, with the last field
                                +-- being the weight associated with the word.
                                +-- 
                                +-- For each batch, this op picks a single set of sampled candidate labels.
                                +-- 
                                +-- The advantages of sampling candidates per-batch are simplicity and the
                                +-- possibility of efficient dense matrix multiplication. The disadvantage is that
                                +-- the sampled candidates must be chosen independently of the context and of the
                                +-- true labels.
                                +fixedUnigramCandidateSampler :: forall v'1 m' . (MonadBuild m') => 
                                +                                Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                                -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                                -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                                -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                        -- candidates in a batch are unique. This requires some approximation to
                                +                                        -- estimate the post-rejection sampling probabilities.
                                +                                -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                             -- IDs of the num_true target_classes in the corresponding original label.
                                +                                -> m' ((Tensor Value Data.Int.Int64,
                                +                                        Tensor Value Float, Tensor Value Float))
                                +                                -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                                --
                                +                                -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                                -- the ID of a sampled candidate.
                                +                                --
                                +                                -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                                -- the number of times each candidate is expected to occur in a batch
                                +                                -- of sampled candidates. If unique=true, then this is a probability.
                                +                                --
                                +                                -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                                -- candidate representing the number of times the candidate is expected
                                +                                -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                                -- probability.
                                +fixedUnigramCandidateSampler = fixedUnigramCandidateSampler' id
                                +fixedUnigramCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                                 Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                                 -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                                 -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                                 -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                         -- candidates in a batch are unique. This requires some approximation to
                                +                                         -- estimate the post-rejection sampling probabilities.
                                +                                 -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                              -- IDs of the num_true target_classes in the corresponding original label.
                                +                                 -> m' ((Tensor Value Data.Int.Int64,
                                +                                         Tensor Value Float,
                                +                                         Tensor Value Float))
                                +                                 -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                                 --
                                +                                 -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                                 -- the ID of a sampled candidate.
                                +                                 --
                                +                                 -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                                 -- the number of times each candidate is expected to occur in a batch
                                +                                 -- of sampled candidates. If unique=true, then this is a probability.
                                +                                 --
                                +                                 -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                                 -- candidate representing the number of times the candidate is expected
                                +                                 -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                                 -- probability.
                                +fixedUnigramCandidateSampler' op'options num_sampled num_true range_max unique
                                +                              true_classes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes]
                                +        buildOp [] (opDef "FixedUnigramCandidateSampler"
                                +                    & opAttr "num_sampled" .~ num_sampled
                                +                    & opAttr "num_true" .~ num_true
                                +                    & opAttr "range_max" .~ range_max
                                +                    & opAttr "unique" .~ unique
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "true_classes"
                                +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sampled_candidates"
                                +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "true_expected_count"
                                +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "sampled_expected_count"
                                +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "num_true"
                                +  type: "int"
                                +  description: "Number of true labels per context."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "num_sampled"
                                +  type: "int"
                                +  description: "Number of candidates to randomly sample."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "unique"
                                +  type: "bool"
                                +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
                                +}
                                +attr {
                                +  name: "range_max"
                                +  type: "int"
                                +  description: "The sampler will sample integers from the interval [0, range_max)."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "vocab_file"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "Each valid line in this file (which should have a CSV-like format)\ncorresponds to a valid word ID. IDs are in sequential order, starting from\nnum_reserved_ids. The last entry in each line is expected to be a value\ncorresponding to the count or relative probability. Exactly one of vocab_file\nand unigrams needs to be passed to this op."
                                +}
                                +attr {
                                +  name: "distortion"
                                +  type: "float"
                                +  default_value { f: 1.0 }
                                +  description: "The distortion is used to skew the unigram probability distribution.\nEach weight is first raised to the distortion\'s power before adding to the\ninternal unigram distribution. As a result, distortion = 1.0 gives regular\nunigram sampling (as defined by the vocab file), and distortion = 0.0 gives\na uniform distribution."
                                +}
                                +attr {
                                +  name: "num_reserved_ids"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Optionally some reserved IDs can be added in the range [0,\n..., num_reserved_ids) by the users. One use case is that a special unknown\nword token is used as ID 0. These IDs will have a sampling probability of 0."
                                +}
                                +attr {
                                +  name: "num_shards"
                                +  type: "int"
                                +  default_value { i: 1 }
                                +  description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'shard\') indicates the number of partitions that are being\nused in the overall computation."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "shard"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'num_shards\') indicates the particular partition number of a\nsampler op, when partitioning is being used."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "unigrams"
                                +  type: "list(float)"
                                +  default_value { list { } }
                                +  description: "A list of unigram counts or probabilities, one per ID in sequential\norder. Exactly one of vocab_file and unigrams should be passed to this op."
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +-}
                                +
                                +-- | Returns element-wise largest integer not greater than x.
                                +
                                +floor :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +floor = floor' id
                                +floor' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +floor' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Floor"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns x // y element-wise.
                                +--
                                +-- *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +floorDiv :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => 
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build t -- ^ __z__
                                +floorDiv = floorDiv' id
                                +floorDiv' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor v'2 t -- ^ __y__
                                +             -> Tensor Build t -- ^ __z__
                                +floorDiv' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "FloorDiv"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
                                +--
                                +-- true, this follows Python semantics in that the result here is consistent
                                +-- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
                                +-- 
                                +-- *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +floorMod :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double,
                                +                                        Float] t) => 
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build t -- ^ __z__
                                +floorMod = floorMod' id
                                +floorMod' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double,
                                +                                         Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor v'2 t -- ^ __y__
                                +             -> Tensor Build t -- ^ __z__
                                +floorMod' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "FloorMod"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Performs fractional average pooling on the input.
                                +--
                                +-- Fractional average pooling is similar to Fractional max pooling in the pooling
                                +-- region generation step. The only difference is that after pooling regions are
                                +-- generated, a mean operation is performed instead of a max operation in each
                                +-- pooling region.
                                +fractionalAvgPool :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                             Double, Float] t) => 
                                +                     Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
                                +                     -> (Tensor Build t, Tensor Build Data.Int.Int64,
                                +                         Tensor Build Data.Int.Int64)
                                +                     -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)
                                +                     --
                                +                     -- * __output__: output tensor after fractional avg pooling.
                                +                     --
                                +                     -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.
                                +                     --
                                +                     -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.
                                +fractionalAvgPool = fractionalAvgPool' id
                                +fractionalAvgPool' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                              Double, Float] t) => OpParams ->
                                +                      Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
                                +                      -> (Tensor Build t, Tensor Build Data.Int.Int64,
                                +                          Tensor Build Data.Int.Int64)
                                +                      -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)
                                +                      --
                                +                      -- * __output__: output tensor after fractional avg pooling.
                                +                      --
                                +                      -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.
                                +                      --
                                +                      -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.
                                +fractionalAvgPool' op'options value | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value]
                                +        return (opDef "FractionalAvgPool"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "output tensor after fractional avg pooling."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "row_pooling_sequence"
                                +  description: "row pooling sequence, needed to calculate gradient."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "col_pooling_sequence"
                                +  description: "column pooling sequence, needed to calculate gradient."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "pooling_ratio"
                                +  type: "list(float)"
                                +  description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "pseudo_random"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random."
                                +}
                                +attr {
                                +  name: "overlapping"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling."
                                +}
                                +attr {
                                +  name: "deterministic"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalAvgPool node in the computation graph. Mainly used\nin unit test to make FractionalAvgPool deterministic."
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes gradient of the FractionalAvgPool function.
                                +--
                                +-- Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
                                +-- FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
                                +-- out_backprop to those indices that form the same pooling cell. Therefore, we
                                +-- just need to know the shape of original input tensor, instead of the whole
                                +-- tensor.
                                +fractionalAvgPoolGrad :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64,
                                +                                                             Double,
                                +                                                             Float] t) => 
                                +                         Tensor v'1 Data.Int.Int64 -- ^ __orig_input_tensor_shape__: Original input tensor shape for `fractional_avg_pool`
                                +                         -> Tensor v'2 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients
                                +                                         -- w.r.t. the output of `fractional_avg_pool`.
                                +                         -> Tensor v'3 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with
                                +                                                      -- col_pooling_sequence.
                                +                         -> Tensor v'4 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with
                                +                                                      -- row_pooling sequence.
                                +                         -> Tensor Build t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.
                                +fractionalAvgPoolGrad = fractionalAvgPoolGrad' id
                                +fractionalAvgPoolGrad' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64,
                                +                                                              Double,
                                +                                                              Float] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 Data.Int.Int64 -- ^ __orig_input_tensor_shape__: Original input tensor shape for `fractional_avg_pool`
                                +                          -> Tensor v'2 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients
                                +                                          -- w.r.t. the output of `fractional_avg_pool`.
                                +                          -> Tensor v'3 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with
                                +                                                       -- col_pooling_sequence.
                                +                          -> Tensor v'4 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with
                                +                                                       -- row_pooling sequence.
                                +                          -> Tensor Build t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.
                                +fractionalAvgPoolGrad' op'options orig_input_tensor_shape out_backprop
                                +                       row_pooling_sequence
                                +                       col_pooling_sequence | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input_tensor_shape,
                                +                                                             buildInputs out_backprop,
                                +                                                             buildInputs row_pooling_sequence,
                                +                                                             buildInputs col_pooling_sequence]
                                +        return (opDef "FractionalAvgPoolGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "orig_input_tensor_shape"
                                +  description: "Original input tensor shape for `fractional_avg_pool`"
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "4-D with shape `[batch, height, width, channels]`.  Gradients\nw.r.t. the output of `fractional_avg_pool`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "row_pooling_sequence"
                                +  description: "row pooling sequence, form pooling region with\ncol_pooling_sequence."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "col_pooling_sequence"
                                +  description: "column pooling sequence, form pooling region with\nrow_pooling sequence."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D.  Gradients w.r.t. the input of `fractional_avg_pool`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "overlapping"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Performs fractional max pooling on the input.
                                +--
                                +-- Fractional max pooling is slightly different than regular max pooling.  In
                                +-- regular max pooling, you downsize an input set by taking the maximum value of
                                +-- smaller N x N subsections of the set (often 2x2), and try to reduce the set by
                                +-- a factor of N, where N is an integer.  Fractional max pooling, as you might
                                +-- expect from the word "fractional", means that the overall reduction ratio N
                                +-- does not have to be an integer.
                                +-- 
                                +-- The sizes of the pooling regions are generated randomly but are fairly uniform.
                                +-- For example, let's look at the height dimension, and the constraints on the
                                +-- list of rows that will be pool boundaries.
                                +-- 
                                +-- First we define the following:
                                +-- 
                                +-- 1.  input_row_length : the number of rows from the input set
                                +-- 2.  output_row_length : which will be smaller than the input
                                +-- 3.  alpha = input_row_length / output_row_length : our reduction ratio
                                +-- 4.  K = floor(alpha)
                                +-- 5.  row_pooling_sequence : this is the result list of pool boundary rows
                                +-- 
                                +-- Then, row_pooling_sequence should satisfy:
                                +-- 
                                +-- 1.  a[0] = 0 : the first value of the sequence is 0
                                +-- 2.  a[end] = input_row_length : the last value of the sequence is the size
                                +-- 3.  K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
                                +-- 4.  length(row_pooling_sequence) = output_row_length+1
                                +-- 
                                +-- For more details on fractional max pooling, see this paper:
                                +-- [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
                                +fractionalMaxPool :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                             Double, Float] t) => 
                                +                     Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
                                +                     -> (Tensor Build t, Tensor Build Data.Int.Int64,
                                +                         Tensor Build Data.Int.Int64)
                                +                     -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)
                                +                     --
                                +                     -- * __output__: output tensor after fractional max pooling.
                                +                     --
                                +                     -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.
                                +                     --
                                +                     -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.
                                +fractionalMaxPool = fractionalMaxPool' id
                                +fractionalMaxPool' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                              Double, Float] t) => OpParams ->
                                +                      Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
                                +                      -> (Tensor Build t, Tensor Build Data.Int.Int64,
                                +                          Tensor Build Data.Int.Int64)
                                +                      -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)
                                +                      --
                                +                      -- * __output__: output tensor after fractional max pooling.
                                +                      --
                                +                      -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.
                                +                      --
                                +                      -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.
                                +fractionalMaxPool' op'options value | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value]
                                +        return (opDef "FractionalMaxPool"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "output tensor after fractional max pooling."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "row_pooling_sequence"
                                +  description: "row pooling sequence, needed to calculate gradient."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "col_pooling_sequence"
                                +  description: "column pooling sequence, needed to calculate gradient."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "pooling_ratio"
                                +  type: "list(float)"
                                +  description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "pseudo_random"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random."
                                +}
                                +attr {
                                +  name: "overlapping"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling."
                                +}
                                +attr {
                                +  name: "deterministic"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalMaxPool node in the computation graph. Mainly used\nin unit test to make FractionalMaxPool deterministic."
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes gradient of the FractionalMaxPool function.
                                +
                                +fractionalMaxPoolGrad :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Int.Int32,
                                +                                                                 Data.Int.Int64,
                                +                                                                 Double,
                                +                                                                 Float] t) => 
                                +                         Tensor v'1 t -- ^ __orig_input__: Original input for `fractional_max_pool`
                                +                         -> Tensor v'2 t -- ^ __orig_output__: Original output for `fractional_max_pool`
                                +                         -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients
                                +                                         -- w.r.t. the output of `fractional_max_pool`.
                                +                         -> Tensor v'4 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with
                                +                                                      -- col_pooling_sequence.
                                +                         -> Tensor v'5 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with
                                +                                                      -- row_pooling sequence.
                                +                         -> Tensor Build t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.
                                +fractionalMaxPoolGrad = fractionalMaxPoolGrad' id
                                +fractionalMaxPoolGrad' :: forall v'1 v'2 v'3 v'4 v'5
                                +                          t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double,
                                +                                       Float] t) => OpParams ->
                                +                          Tensor v'1 t -- ^ __orig_input__: Original input for `fractional_max_pool`
                                +                          -> Tensor v'2 t -- ^ __orig_output__: Original output for `fractional_max_pool`
                                +                          -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients
                                +                                          -- w.r.t. the output of `fractional_max_pool`.
                                +                          -> Tensor v'4 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with
                                +                                                       -- col_pooling_sequence.
                                +                          -> Tensor v'5 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with
                                +                                                       -- row_pooling sequence.
                                +                          -> Tensor Build t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.
                                +fractionalMaxPoolGrad' op'options orig_input orig_output out_backprop
                                +                       row_pooling_sequence
                                +                       col_pooling_sequence | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input,
                                +                                                             buildInputs orig_output,
                                +                                                             buildInputs out_backprop,
                                +                                                             buildInputs row_pooling_sequence,
                                +                                                             buildInputs col_pooling_sequence]
                                +        return (opDef "FractionalMaxPoolGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "orig_input"
                                +  description: "Original input for `fractional_max_pool`"
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "orig_output"
                                +  description: "Original output for `fractional_max_pool`"
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "out_backprop"
                                +  description: "4-D with shape `[batch, height, width, channels]`.  Gradients\nw.r.t. the output of `fractional_max_pool`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "row_pooling_sequence"
                                +  description: "row pooling sequence, form pooling region with\ncol_pooling_sequence."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "col_pooling_sequence"
                                +  description: "column pooling sequence, form pooling region with\nrow_pooling sequence."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D.  Gradients w.r.t. the input of `fractional_max_pool`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "overlapping"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Batch normalization.
                                +--
                                +-- Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
                                +-- The size of 1D Tensors matches the dimension C of the 4D Tensors.
                                +fusedBatchNorm :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Float] t) => 
                                +                  Tensor v'1 t -- ^ __x__: A 4D Tensor for input data.
                                +                  -> Tensor v'2 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x.
                                +                  -> Tensor v'3 t -- ^ __offset__: A 1D Tensor for offset, to shift to the normalized x.
                                +                  -> Tensor v'4 t -- ^ __mean__: A 1D Tensor for population mean. Used for inference only;
                                +                                  -- must be empty for training.
                                +                  -> Tensor v'5 t -- ^ __variance__: A 1D Tensor for population variance. Used for inference only;
                                +                                  -- must be empty for training.
                                +                  -> (Tensor Build t, Tensor Build t, Tensor Build t,
                                +                      Tensor Build t, Tensor Build t)
                                +                  -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__)
                                +                  --
                                +                  -- * __y__: A 4D Tensor for output data.
                                +                  --
                                +                  -- * __batch_mean__: A 1D Tensor for the computed batch mean, to be used by TensorFlow
                                +                  -- to compute the running mean.
                                +                  --
                                +                  -- * __batch_variance__: A 1D Tensor for the computed batch variance, to be used by
                                +                  -- TensorFlow to compute the running variance.
                                +                  --
                                +                  -- * __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused
                                +                  -- in the gradient computation.
                                +                  --
                                +                  -- * __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance
                                +                  -- in the cuDNN case), to be used in the gradient computation.
                                +fusedBatchNorm = fusedBatchNorm' id
                                +fusedBatchNorm' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Float] t) =>
                                +                   OpParams ->
                                +                   Tensor v'1 t -- ^ __x__: A 4D Tensor for input data.
                                +                   -> Tensor v'2 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x.
                                +                   -> Tensor v'3 t -- ^ __offset__: A 1D Tensor for offset, to shift to the normalized x.
                                +                   -> Tensor v'4 t -- ^ __mean__: A 1D Tensor for population mean. Used for inference only;
                                +                                   -- must be empty for training.
                                +                   -> Tensor v'5 t -- ^ __variance__: A 1D Tensor for population variance. Used for inference only;
                                +                                   -- must be empty for training.
                                +                   -> (Tensor Build t, Tensor Build t, Tensor Build t,
                                +                       Tensor Build t, Tensor Build t)
                                +                   -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__)
                                +                   --
                                +                   -- * __y__: A 4D Tensor for output data.
                                +                   --
                                +                   -- * __batch_mean__: A 1D Tensor for the computed batch mean, to be used by TensorFlow
                                +                   -- to compute the running mean.
                                +                   --
                                +                   -- * __batch_variance__: A 1D Tensor for the computed batch variance, to be used by
                                +                   -- TensorFlow to compute the running variance.
                                +                   --
                                +                   -- * __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused
                                +                   -- in the gradient computation.
                                +                   --
                                +                   -- * __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance
                                +                   -- in the cuDNN case), to be used in the gradient computation.
                                +fusedBatchNorm' op'options x scale offset mean variance | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs scale,
                                +                                                             buildInputs offset,
                                +                                                             buildInputs mean,
                                +                                                             buildInputs variance]
                                +        return (opDef "FusedBatchNorm"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "x" description: "A 4D Tensor for input data." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "scale"
                                +  description: "A 1D Tensor for scaling factor, to scale the normalized x."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "offset"
                                +  description: "A 1D Tensor for offset, to shift to the normalized x."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "mean"
                                +  description: "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "variance"
                                +  description: "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "y"
                                +  description: "A 4D Tensor for output data."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "batch_mean"
                                +  description: "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "batch_variance"
                                +  description: "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "reserve_space_1"
                                +  description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "reserve_space_2"
                                +  description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be used in the gradient computation."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  description: "The data type for the elements of input and output Tensors."
                                +  allowed_values { list { type: DT_FLOAT } }
                                +}
                                +attr {
                                +  name: "epsilon"
                                +  type: "float"
                                +  default_value { f: 1.0e-4 }
                                +  description: "A small float number added to the variance of x."
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\"."
                                +}
                                +attr {
                                +  name: "is_training"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "A bool value to indicate the operation is for training (default)\nor inference."
                                +}
                                +-}
                                +
                                +-- | Gradient for batch normalization.
                                +--
                                +-- Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
                                +-- The size of 1D Tensors matches the dimension C of the 4D Tensors.
                                +fusedBatchNormGrad :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Float] t) => 
                                +                      Tensor v'1 t -- ^ __y_backprop__: A 4D Tensor for the gradient with respect to y.
                                +                      -> Tensor v'2 t -- ^ __x__: A 4D Tensor for input data.
                                +                      -> Tensor v'3 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x.
                                +                      -> Tensor v'4 t -- ^ __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused
                                +                                      -- in the gradient computation.
                                +                      -> Tensor v'5 t -- ^ __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance
                                +                                      -- in the cuDNN case), to be used in the gradient computation.
                                +                      -> (Tensor Build t, Tensor Build t, Tensor Build t,
                                +                          Tensor Build t, Tensor Build t)
                                +                      -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__)
                                +                      --
                                +                      -- * __x_backprop__: A 4D Tensor for the gradient with respect to x.
                                +                      --
                                +                      -- * __scale_backprop__: A 1D Tensor for the gradient with respect to scale.
                                +                      --
                                +                      -- * __offset_backprop__: A 1D Tensor for the gradient with respect to offset.
                                +                      --
                                +                      -- * __reserve_space_3__: Unused placeholder to match the mean input in FusedBatchNorm.
                                +                      --
                                +                      -- * __reserve_space_4__: Unused placeholder to match the variance input
                                +                      -- in FusedBatchNorm.
                                +fusedBatchNormGrad = fusedBatchNormGrad' id
                                +fusedBatchNormGrad' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Float] t) =>
                                +                       OpParams ->
                                +                       Tensor v'1 t -- ^ __y_backprop__: A 4D Tensor for the gradient with respect to y.
                                +                       -> Tensor v'2 t -- ^ __x__: A 4D Tensor for input data.
                                +                       -> Tensor v'3 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x.
                                +                       -> Tensor v'4 t -- ^ __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused
                                +                                       -- in the gradient computation.
                                +                       -> Tensor v'5 t -- ^ __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance
                                +                                       -- in the cuDNN case), to be used in the gradient computation.
                                +                       -> (Tensor Build t, Tensor Build t, Tensor Build t,
                                +                           Tensor Build t, Tensor Build t)
                                +                       -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__)
                                +                       --
                                +                       -- * __x_backprop__: A 4D Tensor for the gradient with respect to x.
                                +                       --
                                +                       -- * __scale_backprop__: A 1D Tensor for the gradient with respect to scale.
                                +                       --
                                +                       -- * __offset_backprop__: A 1D Tensor for the gradient with respect to offset.
                                +                       --
                                +                       -- * __reserve_space_3__: Unused placeholder to match the mean input in FusedBatchNorm.
                                +                       --
                                +                       -- * __reserve_space_4__: Unused placeholder to match the variance input
                                +                       -- in FusedBatchNorm.
                                +fusedBatchNormGrad' op'options y_backprop x scale reserve_space_1
                                +                    reserve_space_2 | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y_backprop,
                                +                                                             buildInputs x,
                                +                                                             buildInputs scale,
                                +                                                             buildInputs reserve_space_1,
                                +                                                             buildInputs reserve_space_2]
                                +        return (opDef "FusedBatchNormGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "y_backprop"
                                +  description: "A 4D Tensor for the gradient with respect to y."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "x" description: "A 4D Tensor for input data." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "scale"
                                +  description: "A 1D Tensor for scaling factor, to scale the normalized x."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "reserve_space_1"
                                +  description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "reserve_space_2"
                                +  description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be used in the gradient computation."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "x_backprop"
                                +  description: "A 4D Tensor for the gradient with respect to x."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "scale_backprop"
                                +  description: "A 1D Tensor for the gradient with respect to scale."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "offset_backprop"
                                +  description: "A 1D Tensor for the gradient with respect to offset."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "reserve_space_3"
                                +  description: "Unused placeholder to match the mean input in FusedBatchNorm."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "reserve_space_4"
                                +  description: "Unused placeholder to match the variance input\nin FusedBatchNorm."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  description: "The data type for the elements of input and output Tensors."
                                +  allowed_values { list { type: DT_FLOAT } }
                                +}
                                +attr {
                                +  name: "epsilon"
                                +  type: "float"
                                +  default_value { f: 1.0e-4 }
                                +  description: "A small float number added to the variance of x."
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\"."
                                +}
                                +attr {
                                +  name: "is_training"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "A bool value to indicate the operation is for training (default)\nor inference."
                                +}
                                +-}
                                +
                                +-- | Performs a padding as a preprocess during a convolution.
                                +--
                                +-- Similar to FusedResizeAndPadConv2d, this op allows for an optimized
                                +-- implementation where the spatial padding transformation stage is fused with the
                                +-- im2col lookup, but in this case without the bilinear filtering required for
                                +-- resizing. Fusing the padding prevents the need to write out the intermediate
                                +-- results as whole tensors, reducing memory pressure, and we can get some latency
                                +-- gains by merging the transformation calculations.
                                +-- The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
                                +-- order is used instead.
                                +-- Internally this op uses a single per-graph scratch buffer, which means that it
                                +-- will block if multiple versions are being run in parallel. This is because this
                                +-- operator is primarily an optimization to minimize memory usage.
                                +fusedPadConv2D :: forall v'1 v'2 v'3 t . (OneOf '[Float] t) => 
                                +                  Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
                                +                  -> Tensor v'2 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
                                +                                               -- rows must be the same as the rank of `input`.
                                +                  -> Tensor v'3 t -- ^ __filter__: 4-D with shape
                                +                                  -- `[filter_height, filter_width, in_channels, out_channels]`.
                                +                  -> Tensor Build t -- ^ __output__
                                +fusedPadConv2D = fusedPadConv2D' id
                                +fusedPadConv2D' :: forall v'1 v'2 v'3 t . (OneOf '[Float] t) => OpParams ->
                                +                   Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
                                +                   -> Tensor v'2 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
                                +                                                -- rows must be the same as the rank of `input`.
                                +                   -> Tensor v'3 t -- ^ __filter__: 4-D with shape
                                +                                   -- `[filter_height, filter_width, in_channels, out_channels]`.
                                +                   -> Tensor Build t -- ^ __output__
                                +fusedPadConv2D' op'options input paddings filter | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs paddings,
                                +                                                             buildInputs filter]
                                +        return (opDef "FusedPadConv2D"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "paddings"
                                +  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T" type: "type" allowed_values { list { type: DT_FLOAT } }
                                +}
                                +attr {
                                +  name: "mode"
                                +  type: "string"
                                +  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Performs a resize and padding as a preprocess during a convolution.
                                +--
                                +-- It's often possible to do spatial transformations more efficiently as part of
                                +-- the packing stage of a convolution, so this op allows for an optimized
                                +-- implementation where these stages are fused together. This prevents the need to
                                +-- write out the intermediate results as whole tensors, reducing memory pressure,
                                +-- and we can get some latency gains by merging the transformation calculations.
                                +-- The data_format attribute for Conv2D isn't supported by this op, and defaults to
                                +-- 'NHWC' order.
                                +-- Internally this op uses a single per-graph scratch buffer, which means that it
                                +-- will block if multiple versions are being run in parallel. This is because this
                                +-- operator is primarily an optimization to minimize memory usage.
                                +fusedResizeAndPadConv2D :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Float] t) => 
                                +                           Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
                                +                           -> Tensor v'2 Data.Int.Int32 -- ^ __size__: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                                        -- new size for the images.
                                +                           -> Tensor v'3 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
                                +                                                        -- rows must be the same as the rank of `input`.
                                +                           -> Tensor v'4 t -- ^ __filter__: 4-D with shape
                                +                                           -- `[filter_height, filter_width, in_channels, out_channels]`.
                                +                           -> Tensor Build t -- ^ __output__
                                +fusedResizeAndPadConv2D = fusedResizeAndPadConv2D' id
                                +fusedResizeAndPadConv2D' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Float] t) =>
                                +                            OpParams ->
                                +                            Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
                                +                            -> Tensor v'2 Data.Int.Int32 -- ^ __size__: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                                         -- new size for the images.
                                +                            -> Tensor v'3 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
                                +                                                         -- rows must be the same as the rank of `input`.
                                +                            -> Tensor v'4 t -- ^ __filter__: 4-D with shape
                                +                                            -- `[filter_height, filter_width, in_channels, out_channels]`.
                                +                            -> Tensor Build t -- ^ __output__
                                +fusedResizeAndPadConv2D' op'options input size paddings
                                +                         filter | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs size,
                                +                                                             buildInputs paddings,
                                +                                                             buildInputs filter]
                                +        return (opDef "FusedResizeAndPadConv2D"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "paddings"
                                +  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "filter"
                                +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T" type: "type" allowed_values { list { type: DT_FLOAT } }
                                +}
                                +attr {
                                +  name: "resize_align_corners"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, rescale input by (new_height - 1) / (height - 1),\nwhich exactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
                                +}
                                +attr {
                                +  name: "mode"
                                +  type: "string"
                                +  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Gather slices from `params` according to `indices`.
                                +--
                                +-- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
                                +-- Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
                                +-- 
                                +-- ```python
                                +--     # Scalar indices
                                +--     output[:, ..., :] = params[indices, :, ... :]
                                +-- 
                                +--     # Vector indices
                                +--     output[i, :, ..., :] = params[indices[i], :, ... :]
                                +-- 
                                +--     # Higher rank indices
                                +--     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
                                +-- ```
                                +-- 
                                +-- If `indices` is a permutation and `len(indices) == params.shape[0]` then
                                +-- this operation will permute `params` accordingly.
                                +-- 
                                +-- `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
                                +-- `indices` are always validated to be within range. If assigned to GPU,
                                +-- out-of-bound indices result in safe but unspecified behavior, which may include
                                +-- raising an error.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
                                +-- </div>
                                +gather :: forall v'1 v'2 tparams tindices . (TensorType tparams,
                                +                                             OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] tindices) =>
                                +          
                                +          Tensor v'1 tparams -- ^ __params__
                                +          -> Tensor v'2 tindices -- ^ __indices__
                                +          -> Tensor Build tparams -- ^ __output__
                                +gather = gather' id
                                +gather' :: forall v'1 v'2 tparams tindices . (TensorType tparams,
                                +                                              OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +           OpParams ->
                                +           Tensor v'1 tparams -- ^ __params__
                                +           -> Tensor v'2 tindices -- ^ __indices__
                                +           -> Tensor Build tparams -- ^ __output__
                                +gather' op'options params indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs params,
                                +                                                             buildInputs indices]
                                +        return (opDef "Gather"
                                +                & opAttr "Tparams" .~ tensorType (undefined :: tparams)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "params" type_attr: "Tparams" }
                                +input_arg { name: "indices" type_attr: "Tindices" }
                                +output_arg { name: "output" type_attr: "Tparams" }
                                +attr {
                                +  name: "validate_indices" type: "bool" default_value { b: true }
                                +}
                                +attr { name: "Tparams" type: "type" }
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Gather slices from `params` into a Tensor with shape specified by `indices`.
                                +--
                                +-- `indices` is an K-dimensional integer tensor, best thought of as a
                                +-- (K-1)-dimensional tensor of indices into `params`, where each element defines a
                                +-- slice of `params`:
                                +-- 
                                +--     output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]
                                +-- 
                                +-- Whereas in @{tf.gather} `indices` defines slices into the first
                                +-- dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
                                +-- first `N` dimensions of `params`, where `N = indices.shape[-1]`.
                                +-- 
                                +-- The last dimension of `indices` can be at most the rank of
                                +-- `params`:
                                +-- 
                                +--     indices.shape[-1] <= params.rank
                                +-- 
                                +-- The last dimension of `indices` corresponds to elements
                                +-- (if `indices.shape[-1] == params.rank`) or slices
                                +-- (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
                                +-- of `params`.  The output tensor has shape
                                +-- 
                                +--     indices.shape[:-1] + params.shape[indices.shape[-1]:]
                                +-- 
                                +-- Some examples below.
                                +-- 
                                +-- Simple indexing into a matrix:
                                +-- 
                                +-- ```python
                                +--     indices = [[0, 0], [1, 1]]
                                +--     params = [['a', 'b'], ['c', 'd']]
                                +--     output = ['a', 'd']
                                +-- ```
                                +-- 
                                +-- Slice indexing into a matrix:
                                +-- 
                                +-- ```python
                                +--     indices = [[1], [0]]
                                +--     params = [['a', 'b'], ['c', 'd']]
                                +--     output = [['c', 'd'], ['a', 'b']]
                                +-- ```
                                +-- 
                                +-- Indexing into a 3-tensor:
                                +-- 
                                +-- ```python
                                +--     indices = [[1]]
                                +--     params = [[['a0', 'b0'], ['c0', 'd0']],
                                +--               [['a1', 'b1'], ['c1', 'd1']]]
                                +--     output = [[['a1', 'b1'], ['c1', 'd1']]]
                                +-- 
                                +-- 
                                +--     indices = [[0, 1], [1, 0]]
                                +--     params = [[['a0', 'b0'], ['c0', 'd0']],
                                +--               [['a1', 'b1'], ['c1', 'd1']]]
                                +--     output = [['c0', 'd0'], ['a1', 'b1']]
                                +-- 
                                +-- 
                                +--     indices = [[0, 0, 1], [1, 0, 1]]
                                +--     params = [[['a0', 'b0'], ['c0', 'd0']],
                                +--               [['a1', 'b1'], ['c1', 'd1']]]
                                +--     output = ['b0', 'b1']
                                +-- ```
                                +-- 
                                +-- Batched indexing into a matrix:
                                +-- 
                                +-- ```python
                                +--     indices = [[[0, 0]], [[0, 1]]]
                                +--     params = [['a', 'b'], ['c', 'd']]
                                +--     output = [['a'], ['b']]
                                +-- ```
                                +-- 
                                +-- Batched slice indexing into a matrix:
                                +-- 
                                +-- ```python
                                +--     indices = [[[1]], [[0]]]
                                +--     params = [['a', 'b'], ['c', 'd']]
                                +--     output = [[['c', 'd']], [['a', 'b']]]
                                +-- ```
                                +-- 
                                +-- Batched indexing into a 3-tensor:
                                +-- 
                                +-- ```python
                                +--     indices = [[[1]], [[0]]]
                                +--     params = [[['a0', 'b0'], ['c0', 'd0']],
                                +--               [['a1', 'b1'], ['c1', 'd1']]]
                                +--     output = [[[['a1', 'b1'], ['c1', 'd1']]],
                                +--               [[['a0', 'b0'], ['c0', 'd0']]]]
                                +-- 
                                +--     indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
                                +--     params = [[['a0', 'b0'], ['c0', 'd0']],
                                +--               [['a1', 'b1'], ['c1', 'd1']]]
                                +--     output = [[['c0', 'd0'], ['a1', 'b1']],
                                +--               [['a0', 'b0'], ['c1', 'd1']]]
                                +-- 
                                +-- 
                                +--     indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
                                +--     params = [[['a0', 'b0'], ['c0', 'd0']],
                                +--               [['a1', 'b1'], ['c1', 'd1']]]
                                +--     output = [['b0', 'b1'], ['d0', 'c1']]
                                +-- ```
                                +gatherNd :: forall v'1 v'2 tparams tindices . (TensorType tparams,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +            
                                +            Tensor v'1 tparams -- ^ __params__: The tensor from which to gather values.
                                +            -> Tensor v'2 tindices -- ^ __indices__: Index tensor.
                                +            -> Tensor Build tparams -- ^ __output__: Values from `params` gathered from indices given by `indices`, with
                                +            -- shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
                                +gatherNd = gatherNd' id
                                +gatherNd' :: forall v'1 v'2 tparams tindices . (TensorType tparams,
                                +                                                OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] tindices) =>
                                +             OpParams ->
                                +             Tensor v'1 tparams -- ^ __params__: The tensor from which to gather values.
                                +             -> Tensor v'2 tindices -- ^ __indices__: Index tensor.
                                +             -> Tensor Build tparams -- ^ __output__: Values from `params` gathered from indices given by `indices`, with
                                +             -- shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
                                +gatherNd' op'options params indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs params,
                                +                                                             buildInputs indices]
                                +        return (opDef "GatherNd"
                                +                & opAttr "Tparams" .~ tensorType (undefined :: tparams)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "params"
                                +  description: "The tensor from which to gather values."
                                +  type_attr: "Tparams"
                                +}
                                +input_arg {
                                +  name: "indices" description: "Index tensor." type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Values from `params` gathered from indices given by `indices`, with\nshape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`."
                                +  type_attr: "Tparams"
                                +}
                                +attr { name: "Tparams" type: "type" }
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Gather slices from `params` axis `axis` according to `indices`.
                                +--
                                +-- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
                                +-- Produces an output tensor with shape `params.shape[:axis] + indices.shape +
                                +-- params.shape[axis + 1:]` where:
                                +-- 
                                +-- ```python
                                +--     # Scalar indices (output is rank(params) - 1).
                                +--     output[a_0, ..., a_n, b_0, ..., b_n] =
                                +--       params[a_0, ..., a_n, indices, b_0, ..., b_n]
                                +-- 
                                +--     # Vector indices (output is rank(params)).
                                +--     output[a_0, ..., a_n, i, b_0, ..., b_n] =
                                +--       params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
                                +-- 
                                +--     # Higher rank indices (output is rank(params) + rank(indices) - 1).
                                +--     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
                                +--       params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
                                +-- ```
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
                                +-- </div>
                                +gatherV2 :: forall v'1 v'2 v'3 tparams tindices taxis . (TensorType tparams,
                                +                                                         OneOf '[Data.Int.Int32,
                                +                                                                 Data.Int.Int64] tindices,
                                +                                                         OneOf '[Data.Int.Int32,
                                +                                                                 Data.Int.Int64] taxis) =>
                                +            
                                +            Tensor v'1 tparams -- ^ __params__: The tensor from which to gather values. Must be at least rank
                                +                               -- `axis + 1`.
                                +            -> Tensor v'2 tindices -- ^ __indices__: Index tensor. Must be in range `[0, params.shape[axis])`.
                                +            -> Tensor v'3 taxis -- ^ __axis__: The axis in `params` to gather `indices` from. Defaults to the first
                                +                                -- dimension. Supports negative indexes.
                                +            -> Tensor Build tparams -- ^ __output__: Values from `params` gathered from indices given by `indices`, with
                                +            -- shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
                                +gatherV2 = gatherV2' id
                                +gatherV2' :: forall v'1 v'2 v'3 tparams tindices taxis . (TensorType tparams,
                                +                                                          OneOf '[Data.Int.Int32,
                                +                                                                  Data.Int.Int64] tindices,
                                +                                                          OneOf '[Data.Int.Int32,
                                +                                                                  Data.Int.Int64] taxis) =>
                                +             OpParams ->
                                +             Tensor v'1 tparams -- ^ __params__: The tensor from which to gather values. Must be at least rank
                                +                                -- `axis + 1`.
                                +             -> Tensor v'2 tindices -- ^ __indices__: Index tensor. Must be in range `[0, params.shape[axis])`.
                                +             -> Tensor v'3 taxis -- ^ __axis__: The axis in `params` to gather `indices` from. Defaults to the first
                                +                                 -- dimension. Supports negative indexes.
                                +             -> Tensor Build tparams -- ^ __output__: Values from `params` gathered from indices given by `indices`, with
                                +             -- shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
                                +gatherV2' op'options params indices axis | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs params,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs axis]
                                +        return (opDef "GatherV2"
                                +                & opAttr "Tparams" .~ tensorType (undefined :: tparams)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & opAttr "Taxis" .~ tensorType (undefined :: taxis)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "params"
                                +  description: "The tensor from which to gather values. Must be at least rank\n`axis + 1`."
                                +  type_attr: "Tparams"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "Index tensor. Must be in range `[0, params.shape[axis])`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "axis"
                                +  description: "The axis in `params` to gather `indices` from. Defaults to the first\ndimension. Supports negative indexes."
                                +  type_attr: "Taxis"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Values from `params` gathered from indices given by `indices`, with\nshape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`."
                                +  type_attr: "Tparams"
                                +}
                                +attr { name: "Tparams" type: "type" }
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "Taxis"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Store the input tensor in the state of the current session.
                                +
                                +getSessionHandle :: forall v'1 t . (TensorType t) => 
                                +                    Tensor v'1 t -- ^ __value__: The tensor to be stored.
                                +                    -> Tensor Build Data.ByteString.ByteString -- ^ __handle__: The handle for the tensor stored in the session state, represented
                                +                    -- as a string.
                                +getSessionHandle = getSessionHandle' id
                                +getSessionHandle' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                     Tensor v'1 t -- ^ __value__: The tensor to be stored.
                                +                     -> Tensor Build Data.ByteString.ByteString -- ^ __handle__: The handle for the tensor stored in the session state, represented
                                +                     -- as a string.
                                +getSessionHandle' op'options value | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value]
                                +        return (opDef "GetSessionHandle"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "The tensor to be stored."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle for the tensor stored in the session state, represented\nas a string."
                                +  type: DT_STRING
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Store the input tensor in the state of the current session.
                                +
                                +getSessionHandleV2 :: forall v'1 t m' . (MonadBuild m', TensorType t) => 
                                +                      Tensor v'1 t -- ^ __value__: The tensor to be stored.
                                +                      -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle for the tensor stored in the session state, represented
                                +                      -- as a ResourceHandle object.
                                +getSessionHandleV2 = getSessionHandleV2' id
                                +getSessionHandleV2' :: forall v'1 t m' . (MonadBuild m', TensorType t) =>
                                +                       OpParams ->
                                +                       Tensor v'1 t -- ^ __value__: The tensor to be stored.
                                +                       -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle for the tensor stored in the session state, represented
                                +                       -- as a ResourceHandle object.
                                +getSessionHandleV2' op'options value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value]
                                +        buildOp [] (opDef "GetSessionHandleV2"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "The tensor to be stored."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle for the tensor stored in the session state, represented\nas a ResourceHandle object."
                                +  type: DT_RESOURCE
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Get the value of the tensor specified by its handle.
                                +
                                +getSessionTensor :: forall v'1 dtype . (TensorType dtype) => 
                                +                    Tensor v'1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state.
                                +                    -> Tensor Build dtype -- ^ __value__: The tensor for the given handle.
                                +getSessionTensor = getSessionTensor' id
                                +getSessionTensor' :: forall v'1 dtype . (TensorType dtype) => OpParams ->
                                +                     Tensor v'1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state.
                                +                     -> Tensor Build dtype -- ^ __value__: The tensor for the given handle.
                                +getSessionTensor' op'options handle | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        return (opDef "GetSessionTensor"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle for a tensor stored in the session state."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "value"
                                +  description: "The tensor for the given handle."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the output value."
                                +}
                                +-}
                                +
                                +-- | Returns the truth value of (x > y) element-wise.
                                +--
                                +-- *NOTE*: `Greater` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +greater :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor v'2 t -- ^ __y__
                                +           -> Tensor Build Bool -- ^ __z__
                                +greater = greater' id
                                +greater' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => OpParams ->
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build Bool -- ^ __z__
                                +greater' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Greater"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns the truth value of (x >= y) element-wise.
                                +--
                                +-- *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +greaterEqual :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t) => 
                                +                Tensor v'1 t -- ^ __x__
                                +                -> Tensor v'2 t -- ^ __y__
                                +                -> Tensor Build Bool -- ^ __z__
                                +greaterEqual = greaterEqual' id
                                +greaterEqual' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t) => OpParams ->
                                +                 Tensor v'1 t -- ^ __x__
                                +                 -> Tensor v'2 t -- ^ __y__
                                +                 -> Tensor Build Bool -- ^ __z__
                                +greaterEqual' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "GreaterEqual"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Convert one or more images from HSV to RGB.
                                +--
                                +-- Outputs a tensor of the same shape as the `images` tensor, containing the RGB
                                +-- value of the pixels. The output is only well defined if the value in `images`
                                +-- are in `[0,1]`.
                                +-- 
                                +-- See `rgb_to_hsv` for a description of the HSV encoding.
                                +hSVToRGB :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +            Tensor v'1 t -- ^ __images__: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
                                +            -> Tensor Build t -- ^ __output__: `images` converted to RGB.
                                +hSVToRGB = hSVToRGB' id
                                +hSVToRGB' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __images__: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
                                +             -> Tensor Build t -- ^ __output__: `images` converted to RGB.
                                +hSVToRGB' op'options images | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images]
                                +        return (opDef "HSVToRGB"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "1-D or higher rank. HSV data to convert. Last dimension must be size 3."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "`images` converted to RGB."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Creates a non-initialized hash table.
                                +--
                                +-- This op creates a hash table, specifying the type of its keys and values.
                                +-- Before using the table you will have to initialize it.  After initialization the
                                +-- table will be immutable.
                                +hashTable :: forall m' . (MonadBuild m') => 
                                +             DataType -- ^ __key_dtype__: Type of the table keys.
                                +             -> DataType -- ^ __value_dtype__: Type of the table values.
                                +             -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table.
                                +hashTable = hashTable' id
                                +hashTable' :: forall m' . (MonadBuild m') => OpParams ->
                                +              DataType -- ^ __key_dtype__: Type of the table keys.
                                +              -> DataType -- ^ __value_dtype__: Type of the table values.
                                +              -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table.
                                +hashTable' op'options key_dtype value_dtype | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "HashTable"
                                +                    & opAttr "key_dtype" .~ key_dtype
                                +                    & opAttr "value_dtype" .~ value_dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is shared under the given name across\nmultiple sessions."
                                +}
                                +attr {
                                +  name: "use_node_name_sharing"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true and shared_name is empty, the table is shared\nusing the node name."
                                +}
                                +attr {
                                +  name: "key_dtype"
                                +  type: "type"
                                +  description: "Type of the table keys."
                                +}
                                +attr {
                                +  name: "value_dtype"
                                +  type: "type"
                                +  description: "Type of the table values."
                                +}
                                +-}
                                +
                                +-- | Creates a non-initialized hash table.
                                +--
                                +-- This op creates a hash table, specifying the type of its keys and values.
                                +-- Before using the table you will have to initialize it.  After initialization the
                                +-- table will be immutable.
                                +hashTableV2 :: forall m' . (MonadBuild m') => 
                                +               DataType -- ^ __key_dtype__: Type of the table keys.
                                +               -> DataType -- ^ __value_dtype__: Type of the table values.
                                +               -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__: Handle to a table.
                                +hashTableV2 = hashTableV2' id
                                +hashTableV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                DataType -- ^ __key_dtype__: Type of the table keys.
                                +                -> DataType -- ^ __value_dtype__: Type of the table values.
                                +                -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__: Handle to a table.
                                +hashTableV2' op'options key_dtype value_dtype | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "HashTableV2"
                                +                    & opAttr "key_dtype" .~ key_dtype
                                +                    & opAttr "value_dtype" .~ value_dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is shared under the given name across\nmultiple sessions."
                                +}
                                +attr {
                                +  name: "use_node_name_sharing"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true and shared_name is empty, the table is shared\nusing the node name."
                                +}
                                +attr {
                                +  name: "key_dtype"
                                +  type: "type"
                                +  description: "Type of the table keys."
                                +}
                                +attr {
                                +  name: "value_dtype"
                                +  type: "type"
                                +  description: "Type of the table values."
                                +}
                                +-}
                                +
                                +-- | Outputs a `Summary` protocol buffer with a histogram.
                                +--
                                +-- The generated
                                +-- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
                                +-- has one summary value containing a histogram for `values`.
                                +-- 
                                +-- This op reports an `InvalidArgument` error if any value is not finite.
                                +histogramSummary :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                                Data.Int.Int64, Data.Int.Int8,
                                +                                                Data.Word.Word16,
                                +                                                Data.Word.Word8, Double,
                                +                                                Float] t) => 
                                +                    Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar.  Tag to use for the `Summary.Value`.
                                +                    -> Tensor v'2 t -- ^ __values__: Any shape. Values to use to build the histogram.
                                +                    -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +histogramSummary = histogramSummary' id
                                +histogramSummary' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                                 Data.Int.Int64, Data.Int.Int8,
                                +                                                 Data.Word.Word16,
                                +                                                 Data.Word.Word8, Double,
                                +                                                 Float] t) => OpParams ->
                                +                     Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar.  Tag to use for the `Summary.Value`.
                                +                     -> Tensor v'2 t -- ^ __values__: Any shape. Values to use to build the histogram.
                                +                     -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +histogramSummary' op'options tag values | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag,
                                +                                                             buildInputs values]
                                +        return (opDef "HistogramSummary"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tag"
                                +  description: "Scalar.  Tag to use for the `Summary.Value`."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "Any shape. Values to use to build the histogram."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "summary"
                                +  description: "Scalar. Serialized `Summary` protocol buffer."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Inverse fast Fourier transform.
                                +--
                                +-- Computes the inverse 1-dimensional discrete Fourier transform over the
                                +-- inner-most dimension of `input`.
                                +iFFT :: 
                                +        Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +        -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most
                                +        --   dimension of `input` is replaced with its inverse 1D Fourier transform.
                                +        -- 
                                +        -- @compatibility(numpy)
                                +        -- Equivalent to np.fft.ifft
                                +        -- @end_compatibility
                                +iFFT = iFFT' id
                                +iFFT' :: OpParams ->
                                +         Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +         -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most
                                +         --   dimension of `input` is replaced with its inverse 1D Fourier transform.
                                +         -- 
                                +         -- @compatibility(numpy)
                                +         -- Equivalent to np.fft.ifft
                                +         -- @end_compatibility
                                +iFFT' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "IFFT"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A complex64 tensor." type: DT_COMPLEX64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A complex64 tensor of the same shape as `input`. The inner-most\n  dimension of `input` is replaced with its inverse 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft\n@end_compatibility"
                                +  type: DT_COMPLEX64
                                +}
                                +-}
                                +
                                +-- | Inverse 2D fast Fourier transform.
                                +--
                                +-- Computes the inverse 2-dimensional discrete Fourier transform over the
                                +-- inner-most 2 dimensions of `input`.
                                +iFFT2D :: 
                                +          Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +          -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2
                                +          --   dimensions of `input` are replaced with their inverse 2D Fourier transform.
                                +          -- 
                                +          -- @compatibility(numpy)
                                +          -- Equivalent to np.fft.ifft2
                                +          -- @end_compatibility
                                +iFFT2D = iFFT2D' id
                                +iFFT2D' :: OpParams ->
                                +           Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +           -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2
                                +           --   dimensions of `input` are replaced with their inverse 2D Fourier transform.
                                +           -- 
                                +           -- @compatibility(numpy)
                                +           -- Equivalent to np.fft.ifft2
                                +           -- @end_compatibility
                                +iFFT2D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "IFFT2D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A complex64 tensor." type: DT_COMPLEX64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n  dimensions of `input` are replaced with their inverse 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft2\n@end_compatibility"
                                +  type: DT_COMPLEX64
                                +}
                                +-}
                                +
                                +-- | Inverse 3D fast Fourier transform.
                                +--
                                +-- Computes the inverse 3-dimensional discrete Fourier transform over the
                                +-- inner-most 3 dimensions of `input`.
                                +iFFT3D :: 
                                +          Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +          -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3
                                +          --   dimensions of `input` are replaced with their inverse 3D Fourier transform.
                                +          -- 
                                +          -- @compatibility(numpy)
                                +          -- Equivalent to np.fft.ifftn with 3 dimensions.
                                +          -- @end_compatibility
                                +iFFT3D = iFFT3D' id
                                +iFFT3D' :: OpParams ->
                                +           Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +           -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3
                                +           --   dimensions of `input` are replaced with their inverse 3D Fourier transform.
                                +           -- 
                                +           -- @compatibility(numpy)
                                +           -- Equivalent to np.fft.ifftn with 3 dimensions.
                                +           -- @end_compatibility
                                +iFFT3D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "IFFT3D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A complex64 tensor." type: DT_COMPLEX64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n  dimensions of `input` are replaced with their inverse 3D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifftn with 3 dimensions.\n@end_compatibility"
                                +  type: DT_COMPLEX64
                                +}
                                +-}
                                +
                                +-- | Inverse real-valued fast Fourier transform.
                                +--
                                +-- Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
                                +-- signal over the inner-most dimension of `input`.
                                +-- 
                                +-- The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
                                +-- `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
                                +-- `fft_length` is not provided, it is computed from the size of the inner-most
                                +-- dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
                                +-- compute `input` is odd, it should be provided since it cannot be inferred
                                +-- properly.
                                +-- 
                                +-- Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
                                +-- than the corresponding dimension of `input`, the dimension is cropped. If it is
                                +-- larger, the dimension is padded with zeros.
                                +iRFFT :: 
                                +         Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +         -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [1]. The FFT length.
                                +         -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most
                                +         --   dimension of `input` is replaced with the `fft_length` samples of its inverse
                                +         --   1D Fourier transform.
                                +         -- 
                                +         -- @compatibility(numpy)
                                +         -- Equivalent to np.fft.irfft
                                +         -- @end_compatibility
                                +iRFFT = iRFFT' id
                                +iRFFT' :: OpParams ->
                                +          Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +          -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [1]. The FFT length.
                                +          -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most
                                +          --   dimension of `input` is replaced with the `fft_length` samples of its inverse
                                +          --   1D Fourier transform.
                                +          -- 
                                +          -- @compatibility(numpy)
                                +          -- Equivalent to np.fft.irfft
                                +          -- @end_compatibility
                                +iRFFT' op'options input fft_length | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs fft_length]
                                +        return (opDef "IRFFT"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A complex64 tensor." type: DT_COMPLEX64
                                +}
                                +input_arg {
                                +  name: "fft_length"
                                +  description: "An int32 tensor of shape [1]. The FFT length."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A float32 tensor of the same rank as `input`. The inner-most\n  dimension of `input` is replaced with the `fft_length` samples of its inverse\n  1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft\n@end_compatibility"
                                +  type: DT_FLOAT
                                +}
                                +-}
                                +
                                +-- | Inverse 2D real-valued fast Fourier transform.
                                +--
                                +-- Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
                                +-- signal over the inner-most 2 dimensions of `input`.
                                +-- 
                                +-- The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
                                +-- The inner-most dimension contains the `fft_length / 2 + 1` unique components of
                                +-- the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
                                +-- from the size of the inner-most 2 dimensions of `input`. If the FFT length used
                                +-- to compute `input` is odd, it should be provided since it cannot be inferred
                                +-- properly.
                                +-- 
                                +-- Along each axis `IRFFT2D` is computed on, if `fft_length` (or
                                +-- `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
                                +-- corresponding dimension of `input`, the dimension is cropped. If it is larger,
                                +-- the dimension is padded with zeros.
                                +iRFFT2D :: 
                                +           Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +           -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [2]. The FFT length for each dimension.
                                +           -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most 2
                                +           --   dimensions of `input` are replaced with the `fft_length` samples of their
                                +           --   inverse 2D Fourier transform.
                                +           -- 
                                +           -- @compatibility(numpy)
                                +           -- Equivalent to np.fft.irfft2
                                +           -- @end_compatibility
                                +iRFFT2D = iRFFT2D' id
                                +iRFFT2D' :: OpParams ->
                                +            Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +            -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [2]. The FFT length for each dimension.
                                +            -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most 2
                                +            --   dimensions of `input` are replaced with the `fft_length` samples of their
                                +            --   inverse 2D Fourier transform.
                                +            -- 
                                +            -- @compatibility(numpy)
                                +            -- Equivalent to np.fft.irfft2
                                +            -- @end_compatibility
                                +iRFFT2D' op'options input fft_length | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs fft_length]
                                +        return (opDef "IRFFT2D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A complex64 tensor." type: DT_COMPLEX64
                                +}
                                +input_arg {
                                +  name: "fft_length"
                                +  description: "An int32 tensor of shape [2]. The FFT length for each dimension."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A float32 tensor of the same rank as `input`. The inner-most 2\n  dimensions of `input` are replaced with the `fft_length` samples of their\n  inverse 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft2\n@end_compatibility"
                                +  type: DT_FLOAT
                                +}
                                +-}
                                +
                                +-- | Inverse 3D real-valued fast Fourier transform.
                                +--
                                +-- Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
                                +-- signal over the inner-most 3 dimensions of `input`.
                                +-- 
                                +-- The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
                                +-- The inner-most dimension contains the `fft_length / 2 + 1` unique components of
                                +-- the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
                                +-- from the size of the inner-most 3 dimensions of `input`. If the FFT length used
                                +-- to compute `input` is odd, it should be provided since it cannot be inferred
                                +-- properly.
                                +-- 
                                +-- Along each axis `IRFFT3D` is computed on, if `fft_length` (or
                                +-- `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
                                +-- corresponding dimension of `input`, the dimension is cropped. If it is larger,
                                +-- the dimension is padded with zeros.
                                +iRFFT3D :: 
                                +           Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +           -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [3]. The FFT length for each dimension.
                                +           -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most 3
                                +           --   dimensions of `input` are replaced with the `fft_length` samples of their
                                +           --   inverse 3D real Fourier transform.
                                +           -- 
                                +           -- @compatibility(numpy)
                                +           -- Equivalent to np.irfftn with 3 dimensions.
                                +           -- @end_compatibility
                                +iRFFT3D = iRFFT3D' id
                                +iRFFT3D' :: OpParams ->
                                +            Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
                                +            -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [3]. The FFT length for each dimension.
                                +            -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most 3
                                +            --   dimensions of `input` are replaced with the `fft_length` samples of their
                                +            --   inverse 3D real Fourier transform.
                                +            -- 
                                +            -- @compatibility(numpy)
                                +            -- Equivalent to np.irfftn with 3 dimensions.
                                +            -- @end_compatibility
                                +iRFFT3D' op'options input fft_length | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs fft_length]
                                +        return (opDef "IRFFT3D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A complex64 tensor." type: DT_COMPLEX64
                                +}
                                +input_arg {
                                +  name: "fft_length"
                                +  description: "An int32 tensor of shape [3]. The FFT length for each dimension."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A float32 tensor of the same rank as `input`. The inner-most 3\n  dimensions of `input` are replaced with the `fft_length` samples of their\n  inverse 3D real Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.irfftn with 3 dimensions.\n@end_compatibility"
                                +  type: DT_FLOAT
                                +}
                                +-}
                                +
                                +-- | Return a tensor with the same shape and contents as the input tensor or value.
                                +
                                +identity :: forall v'1 t . (TensorType t) => 
                                +            Tensor v'1 t -- ^ __input__
                                +            -> Tensor Build t -- ^ __output__
                                +identity = identity' id
                                +identity' :: forall v'1 t . (TensorType t) => OpParams ->
                                +             Tensor v'1 t -- ^ __input__
                                +             -> Tensor Build t -- ^ __output__
                                +identity' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Identity"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | A Reader that outputs the queued work as both the key and value.
                                +--
                                +-- To use, enqueue strings in a Queue.  ReaderRead will take the front
                                +-- work string and output (work, work).
                                +identityReader :: forall m' . (MonadBuild m') => 
                                +                  m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +identityReader = identityReader' id
                                +identityReader' :: forall m' . (MonadBuild m') => OpParams ->
                                +                   m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +identityReader' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "IdentityReader"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +-}
                                +
                                +-- | A Reader that outputs the queued work as both the key and value.
                                +--
                                +-- To use, enqueue strings in a Queue.  ReaderRead will take the front
                                +-- work string and output (work, work).
                                +identityReaderV2 :: forall m' . (MonadBuild m') => 
                                +                    m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +identityReaderV2 = identityReaderV2' id
                                +identityReaderV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                     m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +identityReaderV2' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "IdentityReaderV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +-}
                                +
                                +-- | Compute the lower regularized incomplete Gamma function `Q(a, x)`.
                                +--
                                +-- The lower regularized incomplete Gamma function is defined as:
                                +-- 
                                +-- 
                                +-- \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
                                +-- 
                                +-- where
                                +-- 
                                +-- \\(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\\)
                                +-- 
                                +-- is the lower incomplete Gamma function.
                                +-- 
                                +-- Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
                                +-- Gamma function.
                                +igamma :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +          Tensor v'1 t -- ^ __a__
                                +          -> Tensor v'2 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __z__
                                +igamma = igamma' id
                                +igamma' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams ->
                                +           Tensor v'1 t -- ^ __a__
                                +           -> Tensor v'2 t -- ^ __x__
                                +           -> Tensor Build t -- ^ __z__
                                +igamma' op'options a x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a,
                                +                                                             buildInputs x]
                                +        return (opDef "Igamma"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "a" type_attr: "T" }
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Compute the upper regularized incomplete Gamma function `Q(a, x)`.
                                +--
                                +-- The upper regularized incomplete Gamma function is defined as:
                                +-- 
                                +-- \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
                                +-- 
                                +-- where
                                +-- 
                                +-- \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
                                +-- 
                                +-- is the upper incomplete Gama function.
                                +-- 
                                +-- Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
                                +-- Gamma function.
                                +igammac :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __a__
                                +           -> Tensor v'2 t -- ^ __x__
                                +           -> Tensor Build t -- ^ __z__
                                +igammac = igammac' id
                                +igammac' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams ->
                                +            Tensor v'1 t -- ^ __a__
                                +            -> Tensor v'2 t -- ^ __x__
                                +            -> Tensor Build t -- ^ __z__
                                +igammac' op'options a x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a,
                                +                                                             buildInputs x]
                                +        return (opDef "Igammac"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "a" type_attr: "T" }
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that contains the elements of `input_dataset` ignoring errors.
                                +
                                +ignoreErrorsDataset :: forall v'1 m' . (MonadBuild m') => 
                                +                       [DataType] -- ^ __output_types__
                                +                       -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                       -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +ignoreErrorsDataset = ignoreErrorsDataset' id
                                +ignoreErrorsDataset' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                        [DataType] -- ^ __output_types__
                                +                        -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                        -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +ignoreErrorsDataset' op'options output_types input_dataset | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset]
                                +        buildOp [] (opDef "IgnoreErrorsDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input_dataset" type: DT_RESOURCE }
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Returns the imaginary part of a complex number.
                                +--
                                +-- Given a tensor `input` of complex numbers, this operation returns a tensor of
                                +-- type `float` that is the imaginary part of each element in `input`. All
                                +-- elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
                                +-- is the real part and *b* is the imaginary part returned by this operation.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
                                +-- tf.imag(input) ==> [4.75, 5.75]
                                +-- ```
                                +imag :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float)] t,
                                +                             OneOf '[Double, Float] tout) => 
                                +        Tensor v'1 t -- ^ __input__
                                +        -> Tensor Build tout -- ^ __output__
                                +imag = imag' id
                                +imag' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float)] t,
                                +                              OneOf '[Double, Float] tout) => OpParams ->
                                +         Tensor v'1 t -- ^ __input__
                                +         -> Tensor Build tout -- ^ __output__
                                +imag' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Imag"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "Tout" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_COMPLEX64 }
                                +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
                                +}
                                +attr {
                                +  name: "Tout"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Outputs a `Summary` protocol buffer with images.
                                +--
                                +-- The summary has up to `max_images` summary values containing images. The
                                +-- images are built from `tensor` which must be 4-D with shape `[batch_size,
                                +-- height, width, channels]` and where `channels` can be:
                                +-- 
                                +-- *  1: `tensor` is interpreted as Grayscale.
                                +-- *  3: `tensor` is interpreted as RGB.
                                +-- *  4: `tensor` is interpreted as RGBA.
                                +-- 
                                +-- The images have the same number of channels as the input tensor. For float
                                +-- input, the values are normalized one image at a time to fit in the range
                                +-- `[0, 255]`.  `uint8` values are unchanged.  The op uses two different
                                +-- normalization algorithms:
                                +-- 
                                +-- *  If the input values are all positive, they are rescaled so the largest one
                                +--    is 255.
                                +-- 
                                +-- *  If any input value is negative, the values are shifted so input value 0.0
                                +--    is at 127.  They are then rescaled so that either the smallest value is 0,
                                +--    or the largest one is 255.
                                +-- 
                                +-- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
                                +-- build the `tag` of the summary values:
                                +-- 
                                +-- *  If `max_images` is 1, the summary value tag is '*tag*/image'.
                                +-- *  If `max_images` is greater than 1, the summary value tags are
                                +--    generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
                                +-- 
                                +-- The `bad_color` argument is the color to use in the generated images for
                                +-- non-finite input values.  It is a `unit8` 1-D tensor of length `channels`.
                                +-- Each element must be in the range `[0, 255]` (It represents the value of a
                                +-- pixel in the output image).  Non-finite values in the input tensor are
                                +-- replaced by this tensor in the output image.  The default value is the color
                                +-- red.
                                +imageSummary :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Data.Word.Word8,
                                +                                            Float] t) => 
                                +                Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
                                +                -> Tensor v'2 t -- ^ __tensor__: 4-D of shape `[batch_size, height, width, channels]` where
                                +                                -- `channels` is 1, 3, or 4.
                                +                -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +imageSummary = imageSummary' id
                                +imageSummary' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Data.Word.Word8,
                                +                                             Float] t) => OpParams ->
                                +                 Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
                                +                 -> Tensor v'2 t -- ^ __tensor__: 4-D of shape `[batch_size, height, width, channels]` where
                                +                                 -- `channels` is 1, 3, or 4.
                                +                 -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +imageSummary' op'options tag tensor | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag,
                                +                                                             buildInputs tensor]
                                +        return (opDef "ImageSummary"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tag"
                                +  description: "Scalar. Used to build the `tag` attribute of the summary values."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor"
                                +  description: "4-D of shape `[batch_size, height, width, channels]` where\n`channels` is 1, 3, or 4."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "summary"
                                +  description: "Scalar. Serialized `Summary` protocol buffer."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "max_images"
                                +  type: "int"
                                +  default_value { i: 3 }
                                +  description: "Max number of batch elements to generate images for."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values {
                                +    list { type: DT_UINT8 type: DT_FLOAT type: DT_HALF }
                                +  }
                                +}
                                +attr {
                                +  name: "bad_color"
                                +  type: "tensor"
                                +  default_value {
                                +    tensor {
                                +      dtype: DT_UINT8
                                +      tensor_shape { dim { size: 4 } }
                                +      int_val: 255
                                +      int_val: 0
                                +      int_val: 0
                                +      int_val: 255
                                +    }
                                +  }
                                +  description: "Color to use for pixels with non-finite values."
                                +}
                                +-}
                                +
                                +-- | Returns immutable tensor from memory region.
                                +--
                                +-- The current implementation memmaps the tensor from a file.
                                +immutableConst :: forall dtype . (TensorType dtype) => 
                                +                  Shape -- ^ __shape__: Shape of the returned tensor.
                                +                  -> Tensor Build dtype -- ^ __tensor__
                                +immutableConst = immutableConst' id
                                +immutableConst' :: forall dtype . (TensorType dtype) => OpParams ->
                                +                   Shape -- ^ __shape__: Shape of the returned tensor.
                                +                   -> Tensor Build dtype -- ^ __tensor__
                                +immutableConst' op'options shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        return (opDef "ImmutableConst"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & opAttr "shape" .~ shape
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "tensor" type_attr: "dtype" }
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "Type of the returned tensor."
                                +}
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  description: "Shape of the returned tensor."
                                +}
                                +attr {
                                +  name: "memory_region_name"
                                +  type: "string"
                                +  description: "Name of readonly memory region used by the tensor, see\nNewReadOnlyMemoryRegionFromFile in tensorflow::Env."
                                +}
                                +-}
                                +
                                +-- | Says whether the targets are in the top `K` predictions.
                                +--
                                +-- This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
                                +-- prediction for the target class is among the top `k` predictions among
                                +-- all predictions for example `i`. Note that the behavior of `InTopK` differs
                                +-- from the `TopK` op in its handling of ties; if multiple classes have the
                                +-- same prediction value and straddle the top-`k` boundary, all of those
                                +-- classes are considered to be in the top `k`.
                                +-- 
                                +-- More formally, let
                                +-- 
                                +--   \\(predictions_i\\) be the predictions for all classes for example `i`,
                                +--   \\(targets_i\\) be the target class for example `i`,
                                +--   \\(out_i\\) be the output for example `i`,
                                +-- 
                                +-- $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
                                +inTopK :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => 
                                +          Data.Int.Int64 -- ^ __k__: Number of top elements to look at for computing precision.
                                +          -> Tensor v'1 Float -- ^ __predictions__: A `batch_size` x `classes` tensor.
                                +          -> Tensor v'2 t -- ^ __targets__: A `batch_size` vector of class ids.
                                +          -> Tensor Build Bool -- ^ __precision__: Computed Precision at `k` as a `bool Tensor`.
                                +inTopK = inTopK' id
                                +inTopK' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) =>
                                +           OpParams ->
                                +           Data.Int.Int64 -- ^ __k__: Number of top elements to look at for computing precision.
                                +           -> Tensor v'1 Float -- ^ __predictions__: A `batch_size` x `classes` tensor.
                                +           -> Tensor v'2 t -- ^ __targets__: A `batch_size` vector of class ids.
                                +           -> Tensor Build Bool -- ^ __precision__: Computed Precision at `k` as a `bool Tensor`.
                                +inTopK' op'options k predictions targets | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs predictions,
                                +                                                             buildInputs targets]
                                +        return (opDef "InTopK"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "k" .~ k
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "predictions"
                                +  description: "A `batch_size` x `classes` tensor."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "targets"
                                +  description: "A `batch_size` vector of class ids."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "precision"
                                +  description: "Computed Precision at `k` as a `bool Tensor`."
                                +  type: DT_BOOL
                                +}
                                +attr {
                                +  name: "k"
                                +  type: "int"
                                +  description: "Number of top elements to look at for computing precision."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Table initializer that takes two tensors for keys and values respectively.
                                +
                                +initializeTable :: forall v'2 v'3 tkey tval m' . (MonadBuild m',
                                +                                                  TensorType tkey,
                                +                                                  TensorType tval) => 
                                +                   Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized.
                                +                   -> Tensor v'2 tkey -- ^ __keys__: Keys of type Tkey.
                                +                   -> Tensor v'3 tval -- ^ __values__: Values of type Tval.
                                +                   -> m' (ControlNode)
                                +initializeTable = initializeTable' id
                                +initializeTable' :: forall v'2 v'3 tkey tval m' . (MonadBuild m',
                                +                                                   TensorType tkey,
                                +                                                   TensorType tval) =>
                                +                    OpParams ->
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized.
                                +                    -> Tensor v'2 tkey -- ^ __keys__: Keys of type Tkey.
                                +                    -> Tensor v'3 tval -- ^ __values__: Values of type Tval.
                                +                    -> m' (ControlNode)
                                +initializeTable' op'options table_handle keys values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs keys,
                                +                                                             buildInputs values]
                                +        buildOp [] (opDef "InitializeTable"
                                +                    & opAttr "Tkey" .~ tensorType (undefined :: tkey)
                                +                    & opAttr "Tval" .~ tensorType (undefined :: tval)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table which will be initialized."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "keys" description: "Keys of type Tkey." type_attr: "Tkey"
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "Values of type Tval."
                                +  type_attr: "Tval"
                                +}
                                +attr { name: "Tkey" type: "type" }
                                +attr { name: "Tval" type: "type" }
                                +-}
                                +
                                +-- | Initializes a table from a text file.
                                +--
                                +-- It inserts one key-value pair into the table for each line of the file.
                                +-- The key and value is extracted from the whole line content, elements from the
                                +-- split line based on `delimiter` or the line number (starting from zero).
                                +-- Where to extract the key and value from a line is specified by `key_index` and
                                +-- `value_index`.
                                +-- 
                                +-- - A value of -1 means use the line number(starting from zero), expects `int64`.
                                +-- - A value of -2 means use the whole line content, expects `string`.
                                +-- - A value >= 0 means use the index (starting at zero) of the split line based
                                +--   on `delimiter`.
                                +initializeTableFromTextFile :: forall v'2 m' . (MonadBuild m') => 
                                +                               Data.Int.Int64 -- ^ __key_index__: Column index in a line to get the table `key` values from.
                                +                               -> Data.Int.Int64 -- ^ __value_index__: Column index that represents information of a line to get the table
                                +                                                 -- `value` values from.
                                +                               -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized.
                                +                               -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__: Filename of a vocabulary text file.
                                +                               -> m' (ControlNode)
                                +initializeTableFromTextFile = initializeTableFromTextFile' id
                                +initializeTableFromTextFile' :: forall v'2 m' . (MonadBuild m') => OpParams ->
                                +                                Data.Int.Int64 -- ^ __key_index__: Column index in a line to get the table `key` values from.
                                +                                -> Data.Int.Int64 -- ^ __value_index__: Column index that represents information of a line to get the table
                                +                                                  -- `value` values from.
                                +                                -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized.
                                +                                -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__: Filename of a vocabulary text file.
                                +                                -> m' (ControlNode)
                                +initializeTableFromTextFile' op'options key_index value_index table_handle
                                +                             filename | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs filename]
                                +        buildOp [] (opDef "InitializeTableFromTextFile"
                                +                    & opAttr "key_index" .~ key_index
                                +                    & opAttr "value_index" .~ value_index
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table which will be initialized."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "filename"
                                +  description: "Filename of a vocabulary text file."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "key_index"
                                +  type: "int"
                                +  description: "Column index in a line to get the table `key` values from."
                                +  has_minimum: true
                                +  minimum: -2
                                +}
                                +attr {
                                +  name: "value_index"
                                +  type: "int"
                                +  description: "Column index that represents information of a line to get the table\n`value` values from."
                                +  has_minimum: true
                                +  minimum: -2
                                +}
                                +attr {
                                +  name: "vocab_size"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "Number of elements of the file, use -1 if unknown."
                                +  has_minimum: true
                                +  minimum: -1
                                +}
                                +attr {
                                +  name: "delimiter"
                                +  type: "string"
                                +  default_value { s: "\t" }
                                +  description: "Delimiter to separate fields in a line."
                                +}
                                +-}
                                +
                                +-- | Initializes a table from a text file.
                                +--
                                +-- It inserts one key-value pair into the table for each line of the file.
                                +-- The key and value is extracted from the whole line content, elements from the
                                +-- split line based on `delimiter` or the line number (starting from zero).
                                +-- Where to extract the key and value from a line is specified by `key_index` and
                                +-- `value_index`.
                                +-- 
                                +-- - A value of -1 means use the line number(starting from zero), expects `int64`.
                                +-- - A value of -2 means use the whole line content, expects `string`.
                                +-- - A value >= 0 means use the index (starting at zero) of the split line based
                                +--   on `delimiter`.
                                +initializeTableFromTextFileV2 :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                                 Data.Int.Int64 -- ^ __key_index__: Column index in a line to get the table `key` values from.
                                +                                 -> Data.Int.Int64 -- ^ __value_index__: Column index that represents information of a line to get the table
                                +                                                   -- `value` values from.
                                +                                 -> Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to a table which will be initialized.
                                +                                 -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__: Filename of a vocabulary text file.
                                +                                 -> m' (ControlNode)
                                +initializeTableFromTextFileV2 = initializeTableFromTextFileV2' id
                                +initializeTableFromTextFileV2' :: forall v'1 v'2 m' . (MonadBuild m') =>
                                +                                  OpParams ->
                                +                                  Data.Int.Int64 -- ^ __key_index__: Column index in a line to get the table `key` values from.
                                +                                  -> Data.Int.Int64 -- ^ __value_index__: Column index that represents information of a line to get the table
                                +                                                    -- `value` values from.
                                +                                  -> Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to a table which will be initialized.
                                +                                  -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__: Filename of a vocabulary text file.
                                +                                  -> m' (ControlNode)
                                +initializeTableFromTextFileV2' op'options key_index value_index table_handle
                                +                               filename | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs filename]
                                +        buildOp [] (opDef "InitializeTableFromTextFileV2"
                                +                    & opAttr "key_index" .~ key_index
                                +                    & opAttr "value_index" .~ value_index
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table which will be initialized."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "filename"
                                +  description: "Filename of a vocabulary text file."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "key_index"
                                +  type: "int"
                                +  description: "Column index in a line to get the table `key` values from."
                                +  has_minimum: true
                                +  minimum: -2
                                +}
                                +attr {
                                +  name: "value_index"
                                +  type: "int"
                                +  description: "Column index that represents information of a line to get the table\n`value` values from."
                                +  has_minimum: true
                                +  minimum: -2
                                +}
                                +attr {
                                +  name: "vocab_size"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "Number of elements of the file, use -1 if unknown."
                                +  has_minimum: true
                                +  minimum: -1
                                +}
                                +attr {
                                +  name: "delimiter"
                                +  type: "string"
                                +  default_value { s: "\t" }
                                +  description: "Delimiter to separate fields in a line."
                                +}
                                +-}
                                +
                                +-- | Table initializer that takes two tensors for keys and values respectively.
                                +
                                +initializeTableV2 :: forall v'1 v'2 v'3 tkey tval m' . (MonadBuild m',
                                +                                                        TensorType tkey,
                                +                                                        TensorType tval) => 
                                +                     Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to a table which will be initialized.
                                +                     -> Tensor v'2 tkey -- ^ __keys__: Keys of type Tkey.
                                +                     -> Tensor v'3 tval -- ^ __values__: Values of type Tval.
                                +                     -> m' (ControlNode)
                                +initializeTableV2 = initializeTableV2' id
                                +initializeTableV2' :: forall v'1 v'2 v'3 tkey tval m' . (MonadBuild m',
                                +                                                         TensorType tkey,
                                +                                                         TensorType tval) =>
                                +                      OpParams ->
                                +                      Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to a table which will be initialized.
                                +                      -> Tensor v'2 tkey -- ^ __keys__: Keys of type Tkey.
                                +                      -> Tensor v'3 tval -- ^ __values__: Values of type Tval.
                                +                      -> m' (ControlNode)
                                +initializeTableV2' op'options table_handle keys values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs keys,
                                +                                                             buildInputs values]
                                +        buildOp [] (opDef "InitializeTableV2"
                                +                    & opAttr "Tkey" .~ tensorType (undefined :: tkey)
                                +                    & opAttr "Tval" .~ tensorType (undefined :: tval)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table which will be initialized."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "keys" description: "Keys of type Tkey." type_attr: "Tkey"
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "Values of type Tval."
                                +  type_attr: "Tval"
                                +}
                                +attr { name: "Tkey" type: "type" }
                                +attr { name: "Tval" type: "type" }
                                +-}
                                +
                                +-- | Computes the reciprocal of x element-wise.
                                +--
                                +-- I.e., \\(y = 1 / x\\).
                                +inv :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Data.Int.Int32,
                                +                               Data.Int.Int64, Data.Word.Word16, Double,
                                +                               Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor Build t -- ^ __y__
                                +inv = inv' id
                                +inv' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                Data.Int.Int64, Data.Word.Word16, Double,
                                +                                Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +inv' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Inv"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the gradient for the inverse of `x` wrt its input.
                                +--
                                +-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
                                +-- is the corresponding input gradient.
                                +invGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Word.Word16, Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor v'2 t -- ^ __y__
                                +           -> Tensor Build t -- ^ __z__
                                +invGrad = invGrad' id
                                +invGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Word.Word16, Double, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build t -- ^ __z__
                                +invGrad' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "InvGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Flips all bits elementwise.
                                +--
                                +-- The result will have exactly those bits set, that are not set in `x`. The
                                +-- computation is performed on the underlying representation of x.
                                +invert :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                  Data.Int.Int64, Data.Int.Int8,
                                +                                  Data.Word.Word16, Data.Word.Word8] t) => 
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +invert = invert' id
                                +invert' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Int.Int64, Data.Int.Int8,
                                +                                   Data.Word.Word16, Data.Word.Word8] t) =>
                                +           OpParams ->
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor Build t -- ^ __y__
                                +invert' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Invert"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the inverse permutation of a tensor.
                                +--
                                +-- This operation computes the inverse of an index permutation. It takes a 1-D
                                +-- integer tensor `x`, which represents the indices of a zero-based array, and
                                +-- swaps each value with its index position. In other words, for an output tensor
                                +-- `y` and an input tensor `x`, this operation computes the following:
                                +-- 
                                +-- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
                                +-- 
                                +-- The values must include 0. There can be no duplicate values or negative values.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor `x` is [3, 4, 0, 2, 1]
                                +-- invert_permutation(x) ==> [2, 4, 3, 0, 1]
                                +-- ```
                                +invertPermutation :: forall v'1 t . (OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] t) => 
                                +                     Tensor v'1 t -- ^ __x__: 1-D.
                                +                     -> Tensor Build t -- ^ __y__: 1-D.
                                +invertPermutation = invertPermutation' id
                                +invertPermutation' :: forall v'1 t . (OneOf '[Data.Int.Int32,
                                +                                              Data.Int.Int64] t) => OpParams ->
                                +                      Tensor v'1 t -- ^ __x__: 1-D.
                                +                      -> Tensor Build t -- ^ __y__: 1-D.
                                +invertPermutation' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "InvertPermutation"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" description: "1-D." type_attr: "T" }
                                +output_arg { name: "y" description: "1-D." type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Returns which elements of x are finite.
                                +--
                                +-- @compatibility(numpy)
                                +-- Equivalent to np.isfinite
                                +-- @end_compatibility
                                +isFinite :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor Build Bool -- ^ __y__
                                +isFinite = isFinite' id
                                +isFinite' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +             OpParams ->
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor Build Bool -- ^ __y__
                                +isFinite' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "IsFinite"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns which elements of x are Inf.
                                +--
                                +-- @compatibility(numpy)
                                +-- Equivalent to np.isinf
                                +-- @end_compatibility
                                +isInf :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build Bool -- ^ __y__
                                +isInf = isInf' id
                                +isInf' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build Bool -- ^ __y__
                                +isInf' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "IsInf"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns which elements of x are NaN.
                                +--
                                +-- @compatibility(numpy)
                                +-- Equivalent to np.isnan
                                +-- @end_compatibility
                                +isNan :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build Bool -- ^ __y__
                                +isNan = isNan' id
                                +isNan' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build Bool -- ^ __y__
                                +isNan' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "IsNan"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Checks whether a tensor has been initialized.
                                +--
                                +-- Outputs boolean scalar indicating whether the tensor has been initialized.
                                +isVariableInitialized :: forall dtype m' . (MonadBuild m', TensorType dtype) => 
                                +                         Tensor Ref dtype -- ^ __ref__: Should be from a `Variable` node. May be uninitialized.
                                +                         -> m' (Tensor Value Bool) -- ^ __is_initialized__
                                +isVariableInitialized = isVariableInitialized' id
                                +isVariableInitialized' :: forall dtype m' . (MonadBuild m', TensorType dtype) =>
                                +                          OpParams ->
                                +                          Tensor Ref dtype -- ^ __ref__: Should be from a `Variable` node. May be uninitialized.
                                +                          -> m' (Tensor Value Bool) -- ^ __is_initialized__
                                +isVariableInitialized' op'options ref | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref]
                                +        buildOp [] (opDef "IsVariableInitialized"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a `Variable` node. May be uninitialized."
                                +  type_attr: "dtype"
                                +  is_ref: true
                                +}
                                +output_arg { name: "is_initialized" type: DT_BOOL }
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of elements in the variable tensor."
                                +}
                                +-}
                                +
                                +-- | A container for an iterator resource.
                                +
                                +iterator :: forall m' . (MonadBuild m') => 
                                +            [DataType] -- ^ __output_types__
                                +            -> m' (Tensor Value ResourceHandle) -- ^ __handle__: A handle to the iterator that can be passed to a "MakeIterator"
                                +            -- or "IteratorGetNext" op.
                                +iterator = iterator' id
                                +iterator' :: forall m' . (MonadBuild m') => OpParams ->
                                +             [DataType] -- ^ __output_types__
                                +             -> m' (Tensor Value ResourceHandle) -- ^ __handle__: A handle to the iterator that can be passed to a "MakeIterator"
                                +             -- or "IteratorGetNext" op.
                                +iterator' op'options output_types | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "Iterator"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "A handle to the iterator that can be passed to a \"MakeIterator\"\nor \"IteratorGetNext\" op."
                                +  type: DT_RESOURCE
                                +}
                                +attr { name: "shared_name" type: "string" }
                                +attr { name: "container" type: "string" }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Releases any resources used by the given iterator.
                                +
                                +iteratorDispose :: forall v'1 m' . (MonadBuild m') => 
                                +                   Tensor v'1 ResourceHandle -- ^ __iterator__
                                +                   -> m' (ControlNode)
                                +iteratorDispose = iteratorDispose' id
                                +iteratorDispose' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                    Tensor v'1 ResourceHandle -- ^ __iterator__
                                +                    -> m' (ControlNode)
                                +iteratorDispose' op'options iterator | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs iterator]
                                +        buildOp [] (opDef "IteratorDispose"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "iterator" type: DT_RESOURCE }
                                +-}
                                +
                                +-- | Converts the given string representing a handle to an iterator to a resource.
                                +
                                +iteratorFromStringHandle :: forall v'1 m' . (MonadBuild m') => 
                                +                            Tensor v'1 Data.ByteString.ByteString -- ^ __string_handle__: A string representation of the given handle.
                                +                            -> m' (Tensor Value ResourceHandle) -- ^ __resource_handle__: A handle to an iterator resource.
                                +iteratorFromStringHandle = iteratorFromStringHandle' id
                                +iteratorFromStringHandle' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                             Tensor v'1 Data.ByteString.ByteString -- ^ __string_handle__: A string representation of the given handle.
                                +                             -> m' (Tensor Value ResourceHandle) -- ^ __resource_handle__: A handle to an iterator resource.
                                +iteratorFromStringHandle' op'options string_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_handle]
                                +        buildOp [] (opDef "IteratorFromStringHandle"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "string_handle"
                                +  description: "A string representation of the given handle."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "resource_handle"
                                +  description: "A handle to an iterator resource."
                                +  type: DT_RESOURCE
                                +}
                                +-}
                                +
                                +-- | Gets the next output from the given iterator.
                                +
                                +iteratorGetNext :: forall v'1 output_types m' . (MonadBuild m',
                                +                                                 TensorTypes output_types) => 
                                +                   Tensor v'1 ResourceHandle -- ^ __iterator__
                                +                   -> m' (TensorList (Value) output_types) -- ^ __components__
                                +iteratorGetNext = iteratorGetNext' id
                                +iteratorGetNext' :: forall v'1 output_types m' . (MonadBuild m',
                                +                                                  TensorTypes output_types) =>
                                +                    OpParams ->
                                +                    Tensor v'1 ResourceHandle -- ^ __iterator__
                                +                    -> m' (TensorList (Value) output_types) -- ^ __components__
                                +iteratorGetNext' op'options iterator | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs iterator]
                                +        buildOp [] (opDef "IteratorGetNext"
                                +                    & opAttr "output_types" .~ fromTensorTypes (Proxy :: Proxy output_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "iterator" type: DT_RESOURCE }
                                +output_arg { name: "components" type_list_attr: "output_types" }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Converts the given `resource_handle` representing an iterator to a string.
                                +
                                +iteratorToStringHandle :: forall v'1 m' . (MonadBuild m') => 
                                +                          Tensor v'1 ResourceHandle -- ^ __resource_handle__: A handle to an iterator resource.
                                +                          -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __string_handle__: A string representation of the given handle.
                                +iteratorToStringHandle = iteratorToStringHandle' id
                                +iteratorToStringHandle' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                           Tensor v'1 ResourceHandle -- ^ __resource_handle__: A handle to an iterator resource.
                                +                           -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __string_handle__: A string representation of the given handle.
                                +iteratorToStringHandle' op'options resource_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource_handle]
                                +        buildOp [] (opDef "IteratorToStringHandle"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "resource_handle"
                                +  description: "A handle to an iterator resource."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg {
                                +  name: "string_handle"
                                +  description: "A string representation of the given handle."
                                +  type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | L2 Loss.
                                +--
                                +-- Computes half the L2 norm of a tensor without the `sqrt`:
                                +-- 
                                +--     output = sum(t ** 2) / 2
                                +l2Loss :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +          Tensor v'1 t -- ^ __t__: Typically 2-D, but may have any dimensions.
                                +          -> Tensor Build t -- ^ __output__: 0-D.
                                +l2Loss = l2Loss' id
                                +l2Loss' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +           OpParams ->
                                +           Tensor v'1 t -- ^ __t__: Typically 2-D, but may have any dimensions.
                                +           -> Tensor Build t -- ^ __output__: 0-D.
                                +l2Loss' op'options t | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t]
                                +        return (opDef "L2Loss"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "t"
                                +  description: "Typically 2-D, but may have any dimensions."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" description: "0-D." type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | A Reader that outputs the records from a LMDB file.
                                +
                                +lMDBReader :: forall m' . (MonadBuild m') => 
                                +              m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +lMDBReader = lMDBReader' id
                                +lMDBReader' :: forall m' . (MonadBuild m') => OpParams ->
                                +               m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +lMDBReader' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "LMDBReader"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +-}
                                +
                                +-- | Local Response Normalization.
                                +--
                                +-- The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
                                +-- dimension), and each vector is normalized independently.  Within a given vector,
                                +-- each component is divided by the weighted, squared sum of inputs within
                                +-- `depth_radius`.  In detail,
                                +-- 
                                +--     sqr_sum[a, b, c, d] =
                                +--         sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
                                +--     output = input / (bias + alpha * sqr_sum) ** beta
                                +-- 
                                +-- For details, see [Krizhevsky et al., ImageNet classification with deep
                                +-- convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
                                +lRN :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => 
                                +       Tensor v'1 t -- ^ __input__: 4-D.
                                +       -> Tensor Build t -- ^ __output__
                                +lRN = lRN' id
                                +lRN' :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __input__: 4-D.
                                +        -> Tensor Build t -- ^ __output__
                                +lRN' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "LRN"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" description: "4-D." type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "depth_radius"
                                +  type: "int"
                                +  default_value { i: 5 }
                                +  description: "0-D.  Half-width of the 1-D normalization window."
                                +}
                                +attr {
                                +  name: "bias"
                                +  type: "float"
                                +  default_value { f: 1.0 }
                                +  description: "An offset (usually positive to avoid dividing by 0)."
                                +}
                                +attr {
                                +  name: "alpha"
                                +  type: "float"
                                +  default_value { f: 1.0 }
                                +  description: "A scale factor, usually positive."
                                +}
                                +attr {
                                +  name: "beta"
                                +  type: "float"
                                +  default_value { f: 0.5 }
                                +  description: "An exponent."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
                                +}
                                +-}
                                +
                                +-- | Gradients for Local Response Normalization.
                                +
                                +lRNGrad :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) => 
                                +           Tensor v'1 t -- ^ __input_grads__: 4-D with shape `[batch, height, width, channels]`.
                                +           -> Tensor v'2 t -- ^ __input_image__: 4-D with shape `[batch, height, width, channels]`.
                                +           -> Tensor v'3 t -- ^ __output_image__: 4-D with shape `[batch, height, width, channels]`.
                                +           -> Tensor Build t -- ^ __output__: The gradients for LRN.
                                +lRNGrad = lRNGrad' id
                                +lRNGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __input_grads__: 4-D with shape `[batch, height, width, channels]`.
                                +            -> Tensor v'2 t -- ^ __input_image__: 4-D with shape `[batch, height, width, channels]`.
                                +            -> Tensor v'3 t -- ^ __output_image__: 4-D with shape `[batch, height, width, channels]`.
                                +            -> Tensor Build t -- ^ __output__: The gradients for LRN.
                                +lRNGrad' op'options input_grads input_image output_image | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_grads,
                                +                                                             buildInputs input_image,
                                +                                                             buildInputs output_image]
                                +        return (opDef "LRNGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_grads"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "input_image"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "output_image"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output" description: "The gradients for LRN." type_attr: "T"
                                +}
                                +attr {
                                +  name: "depth_radius"
                                +  type: "int"
                                +  default_value { i: 5 }
                                +  description: "A depth radius."
                                +}
                                +attr {
                                +  name: "bias"
                                +  type: "float"
                                +  default_value { f: 1.0 }
                                +  description: "An offset (usually > 0 to avoid dividing by 0)."
                                +}
                                +attr {
                                +  name: "alpha"
                                +  type: "float"
                                +  default_value { f: 1.0 }
                                +  description: "A scale factor, usually positive."
                                +}
                                +attr {
                                +  name: "beta"
                                +  type: "float"
                                +  default_value { f: 0.5 }
                                +  description: "An exponent."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
                                +}
                                +-}
                                +
                                +-- | Generates labels for candidate sampling with a learned unigram distribution.
                                +--
                                +-- See explanations of candidate sampling and the data formats at
                                +-- go/candidate-sampling.
                                +-- 
                                +-- For each batch, this op picks a single set of sampled candidate labels.
                                +-- 
                                +-- The advantages of sampling candidates per-batch are simplicity and the
                                +-- possibility of efficient dense matrix multiplication. The disadvantage is that
                                +-- the sampled candidates must be chosen independently of the context and of the
                                +-- true labels.
                                +learnedUnigramCandidateSampler :: forall v'1 m' . (MonadBuild m') => 
                                +                                  Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                                  -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                                  -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                                  -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                          -- candidates in a batch are unique. This requires some approximation to
                                +                                          -- estimate the post-rejection sampling probabilities.
                                +                                  -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                               -- IDs of the num_true target_classes in the corresponding original label.
                                +                                  -> m' ((Tensor Value Data.Int.Int64,
                                +                                          Tensor Value Float,
                                +                                          Tensor Value Float))
                                +                                  -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                                  --
                                +                                  -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                                  -- the ID of a sampled candidate.
                                +                                  --
                                +                                  -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                                  -- the number of times each candidate is expected to occur in a batch
                                +                                  -- of sampled candidates. If unique=true, then this is a probability.
                                +                                  --
                                +                                  -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                                  -- candidate representing the number of times the candidate is expected
                                +                                  -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                                  -- probability.
                                +learnedUnigramCandidateSampler = learnedUnigramCandidateSampler' id
                                +learnedUnigramCandidateSampler' :: forall v'1 m' . (MonadBuild m') =>
                                +                                   OpParams ->
                                +                                   Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                                   -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                                   -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                                   -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                           -- candidates in a batch are unique. This requires some approximation to
                                +                                           -- estimate the post-rejection sampling probabilities.
                                +                                   -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                                -- IDs of the num_true target_classes in the corresponding original label.
                                +                                   -> m' ((Tensor Value Data.Int.Int64,
                                +                                           Tensor Value Float,
                                +                                           Tensor Value Float))
                                +                                   -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                                   --
                                +                                   -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                                   -- the ID of a sampled candidate.
                                +                                   --
                                +                                   -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                                   -- the number of times each candidate is expected to occur in a batch
                                +                                   -- of sampled candidates. If unique=true, then this is a probability.
                                +                                   --
                                +                                   -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                                   -- candidate representing the number of times the candidate is expected
                                +                                   -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                                   -- probability.
                                +learnedUnigramCandidateSampler' op'options num_sampled num_true range_max unique
                                +                                true_classes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes]
                                +        buildOp [] (opDef "LearnedUnigramCandidateSampler"
                                +                    & opAttr "num_sampled" .~ num_sampled
                                +                    & opAttr "num_true" .~ num_true
                                +                    & opAttr "range_max" .~ range_max
                                +                    & opAttr "unique" .~ unique
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "true_classes"
                                +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sampled_candidates"
                                +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "true_expected_count"
                                +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "sampled_expected_count"
                                +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "num_true"
                                +  type: "int"
                                +  description: "Number of true labels per context."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "num_sampled"
                                +  type: "int"
                                +  description: "Number of candidates to randomly sample."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "unique"
                                +  type: "bool"
                                +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
                                +}
                                +attr {
                                +  name: "range_max"
                                +  type: "int"
                                +  description: "The sampler will sample integers from the interval [0, range_max)."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +-}
                                +
                                +-- | Returns the truth value of (x < y) element-wise.
                                +--
                                +-- *NOTE*: `Less` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +less :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor v'2 t -- ^ __y__
                                +        -> Tensor Build Bool -- ^ __z__
                                +less = less' id
                                +less' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor v'2 t -- ^ __y__
                                +         -> Tensor Build Bool -- ^ __z__
                                +less' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Less"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns the truth value of (x <= y) element-wise.
                                +--
                                +-- *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +lessEqual :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t) => 
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor v'2 t -- ^ __y__
                                +             -> Tensor Build Bool -- ^ __z__
                                +lessEqual = lessEqual' id
                                +lessEqual' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t) => OpParams ->
                                +              Tensor v'1 t -- ^ __x__
                                +              -> Tensor v'2 t -- ^ __y__
                                +              -> Tensor Build Bool -- ^ __z__
                                +lessEqual' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "LessEqual"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the log of the absolute value of `Gamma(x)` element-wise.
                                +
                                +lgamma :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +lgamma = lgamma' id
                                +lgamma' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +           OpParams ->
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor Build t -- ^ __y__
                                +lgamma' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Lgamma"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Generates values in an interval.
                                +--
                                +-- A sequence of `num` evenly-spaced values are generated beginning at `start`.
                                +-- If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
                                +-- so that the last one is exactly `stop`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
                                +-- ```
                                +linSpace :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t,
                                +                                         OneOf '[Data.Int.Int32,
                                +                                                 Data.Int.Int64] tidx) => 
                                +            Tensor v'1 t -- ^ __start__: First entry in the range.
                                +            -> Tensor v'2 t -- ^ __stop__: Last entry in the range.
                                +            -> Tensor v'3 tidx -- ^ __num__: Number of values to generate.
                                +            -> Tensor Build t -- ^ __output__: 1-D. The generated values.
                                +linSpace = linSpace' id
                                +linSpace' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t,
                                +                                          OneOf '[Data.Int.Int32,
                                +                                                  Data.Int.Int64] tidx) =>
                                +             OpParams ->
                                +             Tensor v'1 t -- ^ __start__: First entry in the range.
                                +             -> Tensor v'2 t -- ^ __stop__: Last entry in the range.
                                +             -> Tensor v'3 tidx -- ^ __num__: Number of values to generate.
                                +             -> Tensor Build t -- ^ __output__: 1-D. The generated values.
                                +linSpace' op'options start stop num | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs start,
                                +                                                             buildInputs stop,
                                +                                                             buildInputs num]
                                +        return (opDef "LinSpace"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "start"
                                +  description: "First entry in the range."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "stop" description: "Last entry in the range." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "num"
                                +  description: "Number of values to generate."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "1-D. The generated values."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the difference between two lists of numbers or strings.
                                +--
                                +-- Given a list `x` and a list `y`, this operation returns a list `out` that
                                +-- represents all values that are in `x` but not in `y`. The returned list `out`
                                +-- is sorted in the same order that the numbers appear in `x` (duplicates are
                                +-- preserved). This operation also returns a list `idx` that represents the
                                +-- position of each `out` element in `x`. In other words:
                                +-- 
                                +-- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
                                +-- 
                                +-- For example, given this input:
                                +-- 
                                +-- ```
                                +-- x = [1, 2, 3, 4, 5, 6]
                                +-- y = [1, 3, 5]
                                +-- ```
                                +-- 
                                +-- This operation would return:
                                +-- 
                                +-- ```
                                +-- out ==> [2, 4, 6]
                                +-- idx ==> [1, 3, 5]
                                +-- ```
                                +listDiff :: forall v'1 v'2 t out_idx . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] out_idx) =>
                                +            
                                +            Tensor v'1 t -- ^ __x__: 1-D. Values to keep.
                                +            -> Tensor v'2 t -- ^ __y__: 1-D. Values to remove.
                                +            -> (Tensor Build t, Tensor Build out_idx) -- ^ (__out__, __idx__)
                                +            --
                                +            -- * __out__: 1-D. Values present in `x` but not in `y`.
                                +            --
                                +            -- * __idx__: 1-D. Positions of `x` values preserved in `out`.
                                +listDiff = listDiff' id
                                +listDiff' :: forall v'1 v'2 t out_idx . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                               Data.Int.Int64] out_idx) =>
                                +             OpParams ->
                                +             Tensor v'1 t -- ^ __x__: 1-D. Values to keep.
                                +             -> Tensor v'2 t -- ^ __y__: 1-D. Values to remove.
                                +             -> (Tensor Build t, Tensor Build out_idx) -- ^ (__out__, __idx__)
                                +             --
                                +             -- * __out__: 1-D. Values present in `x` but not in `y`.
                                +             --
                                +             -- * __idx__: 1-D. Positions of `x` values preserved in `out`.
                                +listDiff' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "ListDiff"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "out_idx" .~ tensorType (undefined :: out_idx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "x" description: "1-D. Values to keep." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "y" description: "1-D. Values to remove." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "1-D. Values present in `x` but not in `y`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "idx"
                                +  description: "1-D. Positions of `x` values preserved in `out`."
                                +  type_attr: "out_idx"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "out_idx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes natural logarithm of x element-wise.
                                +--
                                +-- I.e., \\(y = \log_e x\\).
                                +log :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Data.Word.Word16,
                                +                               Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor Build t -- ^ __y__
                                +log = log' id
                                +log' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                Double, Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +log' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Log"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes natural logarithm of (1 + x) element-wise.
                                +--
                                +-- I.e., \\(y = \log_e (1 + x)\\).
                                +log1p :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +log1p = log1p' id
                                +log1p' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float),
                                +                                  Data.Word.Word16, Double, Float] t) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +log1p' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Log1p"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes log softmax activations.
                                +--
                                +-- For each batch `i` and class `j` we have
                                +-- 
                                +--     logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
                                +logSoftmax :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +              Tensor v'1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.
                                +              -> Tensor Build t -- ^ __logsoftmax__: Same shape as `logits`.
                                +logSoftmax = logSoftmax' id
                                +logSoftmax' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +               OpParams ->
                                +               Tensor v'1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.
                                +               -> Tensor Build t -- ^ __logsoftmax__: Same shape as `logits`.
                                +logSoftmax' op'options logits | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits]
                                +        return (opDef "LogSoftmax"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "logits"
                                +  description: "2-D with shape `[batch_size, num_classes]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "logsoftmax"
                                +  description: "Same shape as `logits`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Generates labels for candidate sampling with a log-uniform distribution.
                                +--
                                +-- See explanations of candidate sampling and the data formats at
                                +-- go/candidate-sampling.
                                +-- 
                                +-- For each batch, this op picks a single set of sampled candidate labels.
                                +-- 
                                +-- The advantages of sampling candidates per-batch are simplicity and the
                                +-- possibility of efficient dense matrix multiplication. The disadvantage is that
                                +-- the sampled candidates must be chosen independently of the context and of the
                                +-- true labels.
                                +logUniformCandidateSampler :: forall v'1 m' . (MonadBuild m') => 
                                +                              Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                              -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                              -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                              -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                      -- candidates in a batch are unique. This requires some approximation to
                                +                                      -- estimate the post-rejection sampling probabilities.
                                +                              -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                           -- IDs of the num_true target_classes in the corresponding original label.
                                +                              -> m' ((Tensor Value Data.Int.Int64,
                                +                                      Tensor Value Float, Tensor Value Float))
                                +                              -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                              --
                                +                              -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                              -- the ID of a sampled candidate.
                                +                              --
                                +                              -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                              -- the number of times each candidate is expected to occur in a batch
                                +                              -- of sampled candidates. If unique=true, then this is a probability.
                                +                              --
                                +                              -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                              -- candidate representing the number of times the candidate is expected
                                +                              -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                              -- probability.
                                +logUniformCandidateSampler = logUniformCandidateSampler' id
                                +logUniformCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                               Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                               -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                               -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                               -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                       -- candidates in a batch are unique. This requires some approximation to
                                +                                       -- estimate the post-rejection sampling probabilities.
                                +                               -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                            -- IDs of the num_true target_classes in the corresponding original label.
                                +                               -> m' ((Tensor Value Data.Int.Int64,
                                +                                       Tensor Value Float, Tensor Value Float))
                                +                               -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                               --
                                +                               -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                               -- the ID of a sampled candidate.
                                +                               --
                                +                               -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                               -- the number of times each candidate is expected to occur in a batch
                                +                               -- of sampled candidates. If unique=true, then this is a probability.
                                +                               --
                                +                               -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                               -- candidate representing the number of times the candidate is expected
                                +                               -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                               -- probability.
                                +logUniformCandidateSampler' op'options num_sampled num_true range_max unique
                                +                            true_classes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes]
                                +        buildOp [] (opDef "LogUniformCandidateSampler"
                                +                    & opAttr "num_sampled" .~ num_sampled
                                +                    & opAttr "num_true" .~ num_true
                                +                    & opAttr "range_max" .~ range_max
                                +                    & opAttr "unique" .~ unique
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "true_classes"
                                +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sampled_candidates"
                                +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "true_expected_count"
                                +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "sampled_expected_count"
                                +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "num_true"
                                +  type: "int"
                                +  description: "Number of true labels per context."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "num_sampled"
                                +  type: "int"
                                +  description: "Number of candidates to randomly sample."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "unique"
                                +  type: "bool"
                                +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
                                +}
                                +attr {
                                +  name: "range_max"
                                +  type: "int"
                                +  description: "The sampler will sample integers from the interval [0, range_max)."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +-}
                                +
                                +-- | Returns the truth value of x AND y element-wise.
                                +--
                                +-- *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +logicalAnd :: 
                                +              Tensor v'1 Bool -- ^ __x__
                                +              -> Tensor v'2 Bool -- ^ __y__
                                +              -> Tensor Build Bool -- ^ __z__
                                +logicalAnd = logicalAnd' id
                                +logicalAnd' :: OpParams ->
                                +               Tensor v'1 Bool -- ^ __x__
                                +               -> Tensor v'2 Bool -- ^ __y__
                                +               -> Tensor Build Bool -- ^ __z__
                                +logicalAnd' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "LogicalAnd"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type: DT_BOOL }
                                +input_arg { name: "y" type: DT_BOOL }
                                +output_arg { name: "z" type: DT_BOOL }
                                +-}
                                +
                                +-- | Returns the truth value of NOT x element-wise.
                                +
                                +logicalNot :: 
                                +              Tensor v'1 Bool -- ^ __x__
                                +              -> Tensor Build Bool -- ^ __y__
                                +logicalNot = logicalNot' id
                                +logicalNot' :: OpParams ->
                                +               Tensor v'1 Bool -- ^ __x__
                                +               -> Tensor Build Bool -- ^ __y__
                                +logicalNot' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "LogicalNot"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type: DT_BOOL }
                                +output_arg { name: "y" type: DT_BOOL }
                                +-}
                                +
                                +-- | Returns the truth value of x OR y element-wise.
                                +--
                                +-- *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +logicalOr :: 
                                +             Tensor v'1 Bool -- ^ __x__
                                +             -> Tensor v'2 Bool -- ^ __y__
                                +             -> Tensor Build Bool -- ^ __z__
                                +logicalOr = logicalOr' id
                                +logicalOr' :: OpParams ->
                                +              Tensor v'1 Bool -- ^ __x__
                                +              -> Tensor v'2 Bool -- ^ __y__
                                +              -> Tensor Build Bool -- ^ __z__
                                +logicalOr' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "LogicalOr"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type: DT_BOOL }
                                +input_arg { name: "y" type: DT_BOOL }
                                +output_arg { name: "z" type: DT_BOOL }
                                +-}
                                +
                                +-- | Outputs all keys and values in the table.
                                +
                                +lookupTableExport :: forall tkeys tvalues m' . (MonadBuild m', TensorType tkeys,
                                +                                                TensorType tvalues) => 
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                     -> m' ((Tensor Value tkeys, Tensor Value tvalues))
                                +                     -- ^ (__keys__, __values__)
                                +                     --
                                +                     -- * __keys__: Vector of all keys present in the table.
                                +                     --
                                +                     -- * __values__: Tensor of all values in the table. Indexed in parallel with `keys`.
                                +lookupTableExport = lookupTableExport' id
                                +lookupTableExport' :: forall tkeys tvalues m' . (MonadBuild m',
                                +                                                 TensorType tkeys,
                                +                                                 TensorType tvalues) =>
                                +                      OpParams ->
                                +                      Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                      -> m' ((Tensor Value tkeys, Tensor Value tvalues))
                                +                      -- ^ (__keys__, __values__)
                                +                      --
                                +                      -- * __keys__: Vector of all keys present in the table.
                                +                      --
                                +                      -- * __values__: Tensor of all values in the table. Indexed in parallel with `keys`.
                                +lookupTableExport' op'options table_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle]
                                +        buildOp [] (opDef "LookupTableExport"
                                +                    & opAttr "Tkeys" .~ tensorType (undefined :: tkeys)
                                +                    & opAttr "Tvalues" .~ tensorType (undefined :: tvalues)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "keys"
                                +  description: "Vector of all keys present in the table."
                                +  type_attr: "Tkeys"
                                +}
                                +output_arg {
                                +  name: "values"
                                +  description: "Tensor of all values in the table. Indexed in parallel with `keys`."
                                +  type_attr: "Tvalues"
                                +}
                                +attr { name: "Tkeys" type: "type" }
                                +attr { name: "Tvalues" type: "type" }
                                +-}
                                +
                                +-- | Outputs all keys and values in the table.
                                +
                                +lookupTableExportV2 :: forall v'1 tkeys tvalues m' . (MonadBuild m',
                                +                                                      TensorType tkeys,
                                +                                                      TensorType tvalues) => 
                                +                       Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                       -> m' ((Tensor Value tkeys, Tensor Value tvalues))
                                +                       -- ^ (__keys__, __values__)
                                +                       --
                                +                       -- * __keys__: Vector of all keys present in the table.
                                +                       --
                                +                       -- * __values__: Tensor of all values in the table. Indexed in parallel with `keys`.
                                +lookupTableExportV2 = lookupTableExportV2' id
                                +lookupTableExportV2' :: forall v'1 tkeys tvalues m' . (MonadBuild m',
                                +                                                       TensorType tkeys,
                                +                                                       TensorType tvalues) =>
                                +                        OpParams ->
                                +                        Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                        -> m' ((Tensor Value tkeys, Tensor Value tvalues))
                                +                        -- ^ (__keys__, __values__)
                                +                        --
                                +                        -- * __keys__: Vector of all keys present in the table.
                                +                        --
                                +                        -- * __values__: Tensor of all values in the table. Indexed in parallel with `keys`.
                                +lookupTableExportV2' op'options table_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle]
                                +        buildOp [] (opDef "LookupTableExportV2"
                                +                    & opAttr "Tkeys" .~ tensorType (undefined :: tkeys)
                                +                    & opAttr "Tvalues" .~ tensorType (undefined :: tvalues)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg {
                                +  name: "keys"
                                +  description: "Vector of all keys present in the table."
                                +  type_attr: "Tkeys"
                                +}
                                +output_arg {
                                +  name: "values"
                                +  description: "Tensor of all values in the table. Indexed in parallel with `keys`."
                                +  type_attr: "Tvalues"
                                +}
                                +attr { name: "Tkeys" type: "type" }
                                +attr { name: "Tvalues" type: "type" }
                                +-}
                                +
                                +-- | Looks up keys in a table, outputs the corresponding values.
                                +--
                                +-- The tensor `keys` must of the same type as the keys of the table.
                                +-- The output `values` is of the type of the table values.
                                +-- 
                                +-- The scalar `default_value` is the value output for keys not present in the
                                +-- table. It must also be of the same type as the table values.
                                +lookupTableFind :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin,
                                +                                                 TensorType tout) => 
                                +                   Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                   -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                   -> Tensor v'3 tout -- ^ __default_value__
                                +                   -> m' (Tensor Value tout) -- ^ __values__: Same shape as `keys`.  Values found in the table, or `default_values`
                                +                   -- for missing keys.
                                +lookupTableFind = lookupTableFind' id
                                +lookupTableFind' :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin,
                                +                                                  TensorType tout) =>
                                +                    OpParams ->
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                    -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                    -> Tensor v'3 tout -- ^ __default_value__
                                +                    -> m' (Tensor Value tout) -- ^ __values__: Same shape as `keys`.  Values found in the table, or `default_values`
                                +                    -- for missing keys.
                                +lookupTableFind' op'options table_handle keys default_value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs keys,
                                +                                                             buildInputs default_value]
                                +        buildOp [] (opDef "LookupTableFind"
                                +                    & opAttr "Tin" .~ tensorType (undefined :: tin)
                                +                    & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "keys"
                                +  description: "Any shape.  Keys to look up."
                                +  type_attr: "Tin"
                                +}
                                +input_arg { name: "default_value" type_attr: "Tout" }
                                +output_arg {
                                +  name: "values"
                                +  description: "Same shape as `keys`.  Values found in the table, or `default_values`\nfor missing keys."
                                +  type_attr: "Tout"
                                +}
                                +attr { name: "Tin" type: "type" }
                                +attr { name: "Tout" type: "type" }
                                +-}
                                +
                                +-- | Looks up keys in a table, outputs the corresponding values.
                                +--
                                +-- The tensor `keys` must of the same type as the keys of the table.
                                +-- The output `values` is of the type of the table values.
                                +-- 
                                +-- The scalar `default_value` is the value output for keys not present in the
                                +-- table. It must also be of the same type as the table values.
                                +lookupTableFindV2 :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                       TensorType tin,
                                +                                                       TensorType tout) => 
                                +                     Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                     -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                     -> Tensor v'3 tout -- ^ __default_value__
                                +                     -> m' (Tensor Value tout) -- ^ __values__: Same shape as `keys`.  Values found in the table, or `default_values`
                                +                     -- for missing keys.
                                +lookupTableFindV2 = lookupTableFindV2' id
                                +lookupTableFindV2' :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                        TensorType tin,
                                +                                                        TensorType tout) =>
                                +                      OpParams ->
                                +                      Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                      -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                      -> Tensor v'3 tout -- ^ __default_value__
                                +                      -> m' (Tensor Value tout) -- ^ __values__: Same shape as `keys`.  Values found in the table, or `default_values`
                                +                      -- for missing keys.
                                +lookupTableFindV2' op'options table_handle keys
                                +                   default_value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs keys,
                                +                                                             buildInputs default_value]
                                +        buildOp [] (opDef "LookupTableFindV2"
                                +                    & opAttr "Tin" .~ tensorType (undefined :: tin)
                                +                    & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "keys"
                                +  description: "Any shape.  Keys to look up."
                                +  type_attr: "Tin"
                                +}
                                +input_arg { name: "default_value" type_attr: "Tout" }
                                +output_arg {
                                +  name: "values"
                                +  description: "Same shape as `keys`.  Values found in the table, or `default_values`\nfor missing keys."
                                +  type_attr: "Tout"
                                +}
                                +attr { name: "Tin" type: "type" }
                                +attr { name: "Tout" type: "type" }
                                +-}
                                +
                                +-- | Replaces the contents of the table with the specified keys and values.
                                +--
                                +-- The tensor `keys` must be of the same type as the keys of the table.
                                +-- The tensor `values` must be of the type of the table values.
                                +lookupTableImport :: forall v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                   TensorType tin,
                                +                                                   TensorType tout) => 
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                     -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                     -> Tensor v'3 tout -- ^ __values__: Values to associate with keys.
                                +                     -> m' (ControlNode)
                                +lookupTableImport = lookupTableImport' id
                                +lookupTableImport' :: forall v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                    TensorType tin,
                                +                                                    TensorType tout) =>
                                +                      OpParams ->
                                +                      Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                      -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                      -> Tensor v'3 tout -- ^ __values__: Values to associate with keys.
                                +                      -> m' (ControlNode)
                                +lookupTableImport' op'options table_handle keys values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs keys,
                                +                                                             buildInputs values]
                                +        buildOp [] (opDef "LookupTableImport"
                                +                    & opAttr "Tin" .~ tensorType (undefined :: tin)
                                +                    & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "keys"
                                +  description: "Any shape.  Keys to look up."
                                +  type_attr: "Tin"
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "Values to associate with keys."
                                +  type_attr: "Tout"
                                +}
                                +attr { name: "Tin" type: "type" }
                                +attr { name: "Tout" type: "type" }
                                +-}
                                +
                                +-- | Replaces the contents of the table with the specified keys and values.
                                +--
                                +-- The tensor `keys` must be of the same type as the keys of the table.
                                +-- The tensor `values` must be of the type of the table values.
                                +lookupTableImportV2 :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                         TensorType tin,
                                +                                                         TensorType tout) => 
                                +                       Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                       -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                       -> Tensor v'3 tout -- ^ __values__: Values to associate with keys.
                                +                       -> m' (ControlNode)
                                +lookupTableImportV2 = lookupTableImportV2' id
                                +lookupTableImportV2' :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                          TensorType tin,
                                +                                                          TensorType tout) =>
                                +                        OpParams ->
                                +                        Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                        -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                        -> Tensor v'3 tout -- ^ __values__: Values to associate with keys.
                                +                        -> m' (ControlNode)
                                +lookupTableImportV2' op'options table_handle keys values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs keys,
                                +                                                             buildInputs values]
                                +        buildOp [] (opDef "LookupTableImportV2"
                                +                    & opAttr "Tin" .~ tensorType (undefined :: tin)
                                +                    & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "keys"
                                +  description: "Any shape.  Keys to look up."
                                +  type_attr: "Tin"
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "Values to associate with keys."
                                +  type_attr: "Tout"
                                +}
                                +attr { name: "Tin" type: "type" }
                                +attr { name: "Tout" type: "type" }
                                +-}
                                +
                                +-- | Updates the table to associates keys with values.
                                +--
                                +-- The tensor `keys` must be of the same type as the keys of the table.
                                +-- The tensor `values` must be of the type of the table values.
                                +lookupTableInsert :: forall v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                   TensorType tin,
                                +                                                   TensorType tout) => 
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                     -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                     -> Tensor v'3 tout -- ^ __values__: Values to associate with keys.
                                +                     -> m' (ControlNode)
                                +lookupTableInsert = lookupTableInsert' id
                                +lookupTableInsert' :: forall v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                    TensorType tin,
                                +                                                    TensorType tout) =>
                                +                      OpParams ->
                                +                      Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                      -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                      -> Tensor v'3 tout -- ^ __values__: Values to associate with keys.
                                +                      -> m' (ControlNode)
                                +lookupTableInsert' op'options table_handle keys values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs keys,
                                +                                                             buildInputs values]
                                +        buildOp [] (opDef "LookupTableInsert"
                                +                    & opAttr "Tin" .~ tensorType (undefined :: tin)
                                +                    & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "keys"
                                +  description: "Any shape.  Keys to look up."
                                +  type_attr: "Tin"
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "Values to associate with keys."
                                +  type_attr: "Tout"
                                +}
                                +attr { name: "Tin" type: "type" }
                                +attr { name: "Tout" type: "type" }
                                +-}
                                +
                                +-- | Updates the table to associates keys with values.
                                +--
                                +-- The tensor `keys` must be of the same type as the keys of the table.
                                +-- The tensor `values` must be of the type of the table values.
                                +lookupTableInsertV2 :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                         TensorType tin,
                                +                                                         TensorType tout) => 
                                +                       Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                       -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                       -> Tensor v'3 tout -- ^ __values__: Values to associate with keys.
                                +                       -> m' (ControlNode)
                                +lookupTableInsertV2 = lookupTableInsertV2' id
                                +lookupTableInsertV2' :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m',
                                +                                                          TensorType tin,
                                +                                                          TensorType tout) =>
                                +                        OpParams ->
                                +                        Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                        -> Tensor v'2 tin -- ^ __keys__: Any shape.  Keys to look up.
                                +                        -> Tensor v'3 tout -- ^ __values__: Values to associate with keys.
                                +                        -> m' (ControlNode)
                                +lookupTableInsertV2' op'options table_handle keys values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle,
                                +                                                             buildInputs keys,
                                +                                                             buildInputs values]
                                +        buildOp [] (opDef "LookupTableInsertV2"
                                +                    & opAttr "Tin" .~ tensorType (undefined :: tin)
                                +                    & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "keys"
                                +  description: "Any shape.  Keys to look up."
                                +  type_attr: "Tin"
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "Values to associate with keys."
                                +  type_attr: "Tout"
                                +}
                                +attr { name: "Tin" type: "type" }
                                +attr { name: "Tout" type: "type" }
                                +-}
                                +
                                +-- | Computes the number of elements in the given table.
                                +
                                +lookupTableSize :: forall m' . (MonadBuild m') => 
                                +                   Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                   -> m' (Tensor Value Data.Int.Int64) -- ^ __size__: Scalar that contains number of elements in the table.
                                +lookupTableSize = lookupTableSize' id
                                +lookupTableSize' :: forall m' . (MonadBuild m') => OpParams ->
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
                                +                    -> m' (Tensor Value Data.Int.Int64) -- ^ __size__: Scalar that contains number of elements in the table.
                                +lookupTableSize' op'options table_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle]
                                +        buildOp [] (opDef "LookupTableSize"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "Scalar that contains number of elements in the table."
                                +  type: DT_INT64
                                +}
                                +-}
                                +
                                +-- | Computes the number of elements in the given table.
                                +
                                +lookupTableSizeV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                     Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                     -> m' (Tensor Value Data.Int.Int64) -- ^ __size__: Scalar that contains number of elements in the table.
                                +lookupTableSizeV2 = lookupTableSizeV2' id
                                +lookupTableSizeV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                      Tensor v'1 ResourceHandle -- ^ __table_handle__: Handle to the table.
                                +                      -> m' (Tensor Value Data.Int.Int64) -- ^ __size__: Scalar that contains number of elements in the table.
                                +lookupTableSizeV2' op'options table_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle]
                                +        buildOp [] (opDef "LookupTableSizeV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "table_handle"
                                +  description: "Handle to the table."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "Scalar that contains number of elements in the table."
                                +  type: DT_INT64
                                +}
                                +-}
                                +
                                +-- | Forwards the input to the output.
                                +--
                                +-- This operator represents the loop termination condition used by the
                                +-- "pivot" switches of a loop.
                                +loopCond :: 
                                +            Tensor v'1 Bool -- ^ __input__: A boolean scalar, representing the branch predicate of the Switch op.
                                +            -> Tensor Build Bool -- ^ __output__: The same tensor as `input`.
                                +loopCond = loopCond' id
                                +loopCond' :: OpParams ->
                                +             Tensor v'1 Bool -- ^ __input__: A boolean scalar, representing the branch predicate of the Switch op.
                                +             -> Tensor Build Bool -- ^ __output__: The same tensor as `input`.
                                +loopCond' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "LoopCond"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "A boolean scalar, representing the branch predicate of the Switch op."
                                +  type: DT_BOOL
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same tensor as `input`."
                                +  type: DT_BOOL
                                +}
                                +-}
                                +
                                +-- | Makes a new iterator from the given `dataset` and stores it in `iterator`.
                                +--
                                +-- This operation may be executed multiple times. Each execution will reset the
                                +-- iterator in `iterator` to the first element of `dataset`.
                                +makeIterator :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                Tensor v'1 ResourceHandle -- ^ __dataset__
                                +                -> Tensor v'2 ResourceHandle -- ^ __iterator__
                                +                -> m' (ControlNode)
                                +makeIterator = makeIterator' id
                                +makeIterator' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                 Tensor v'1 ResourceHandle -- ^ __dataset__
                                +                 -> Tensor v'2 ResourceHandle -- ^ __iterator__
                                +                 -> m' (ControlNode)
                                +makeIterator' op'options dataset iterator | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs dataset,
                                +                                                             buildInputs iterator]
                                +        buildOp [] (opDef "MakeIterator"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "dataset" type: DT_RESOURCE }
                                +input_arg { name: "iterator" type: DT_RESOURCE }
                                +-}
                                +
                                +-- | Op removes all elements in the underlying container.
                                +
                                +mapClear :: forall m' . (MonadBuild m') => 
                                +            [DataType] -- ^ __dtypes__
                                +            -> m' (ControlNode)
                                +mapClear = mapClear' id
                                +mapClear' :: forall m' . (MonadBuild m') => OpParams ->
                                +             [DataType] -- ^ __dtypes__
                                +             -> m' (ControlNode)
                                +mapClear' op'options dtypes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "MapClear"
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op returns the number of incomplete elements in the underlying container.
                                +
                                +mapIncompleteSize :: forall m' . (MonadBuild m') => 
                                +                     [DataType] -- ^ __dtypes__
                                +                     -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +mapIncompleteSize = mapIncompleteSize' id
                                +mapIncompleteSize' :: forall m' . (MonadBuild m') => OpParams ->
                                +                      [DataType] -- ^ __dtypes__
                                +                      -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +mapIncompleteSize' op'options dtypes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "MapIncompleteSize"
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "size" type: DT_INT32 }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op peeks at the values at the specified key.  If the
                                +--
                                +-- underlying container does not contain this key
                                +-- this op will block until it does.
                                +mapPeek :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => 
                                +           Tensor v'1 Data.Int.Int64 -- ^ __key__
                                +           -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +           -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +mapPeek = mapPeek' id
                                +mapPeek' :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) =>
                                +            OpParams ->
                                +            Tensor v'1 Data.Int.Int64 -- ^ __key__
                                +            -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +            -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +mapPeek' op'options key indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "MapPeek"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "key" type: DT_INT64 }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +output_arg { name: "values" type_list_attr: "dtypes" }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op returns the number of elements in the underlying container.
                                +
                                +mapSize :: forall m' . (MonadBuild m') => 
                                +           [DataType] -- ^ __dtypes__
                                +           -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +mapSize = mapSize' id
                                +mapSize' :: forall m' . (MonadBuild m') => OpParams ->
                                +            [DataType] -- ^ __dtypes__
                                +            -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +mapSize' op'options dtypes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "MapSize"
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "size" type: DT_INT32 }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Stage (key, values) in the underlying container which behaves like a hashtable.
                                +
                                +mapStage :: forall v'1 v'2 v'3 fake_dtypes m' . (MonadBuild m',
                                +                                                 TensorTypes fake_dtypes) => 
                                +            [DataType] -- ^ __dtypes__
                                +            -> Tensor v'1 Data.Int.Int64 -- ^ __key__: int64
                                +            -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +            -> TensorList (v'3) fake_dtypes -- ^ __values__: a list of tensors
                                +                                            -- dtypes A list of data types that inserted values should adhere to.
                                +            -> m' (ControlNode)
                                +mapStage = mapStage' id
                                +mapStage' :: forall v'1 v'2 v'3 fake_dtypes m' . (MonadBuild m',
                                +                                                  TensorTypes fake_dtypes) =>
                                +             OpParams ->
                                +             [DataType] -- ^ __dtypes__
                                +             -> Tensor v'1 Data.Int.Int64 -- ^ __key__: int64
                                +             -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +             -> TensorList (v'3) fake_dtypes -- ^ __values__: a list of tensors
                                +                                             -- dtypes A list of data types that inserted values should adhere to.
                                +             -> m' (ControlNode)
                                +mapStage' op'options dtypes key indices values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs values]
                                +        buildOp [] (opDef "MapStage"
                                +                    & opAttr "fake_dtypes" .~ fromTensorTypes (Proxy :: Proxy fake_dtypes)
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "key" description: "int64" type: DT_INT64 }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +input_arg {
                                +  name: "values"
                                +  description: "a list of tensors\ndtypes A list of data types that inserted values should adhere to."
                                +  type_list_attr: "fake_dtypes"
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr {
                                +  name: "fake_dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "It is necessary to match this name to the matching Unstage Op."
                                +}
                                +-}
                                +
                                +-- | Op removes and returns the values associated with the key
                                +--
                                +-- from the underlying container.   If the underlying container
                                +-- does not contain this key, the op will block until it does.
                                +mapUnstage :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => 
                                +              Tensor v'1 Data.Int.Int64 -- ^ __key__
                                +              -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +              -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +mapUnstage = mapUnstage' id
                                +mapUnstage' :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) =>
                                +               OpParams ->
                                +               Tensor v'1 Data.Int.Int64 -- ^ __key__
                                +               -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +               -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +mapUnstage' op'options key indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "MapUnstage"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "key" type: DT_INT64 }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +output_arg { name: "values" type_list_attr: "dtypes" }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op removes and returns a random (key, value)
                                +--
                                +-- from the underlying container.   If the underlying container
                                +-- does not contain elements, the op will block until it does.
                                +mapUnstageNoKey :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) =>
                                +                   
                                +                   Tensor v'1 Data.Int.Int32 -- ^ __indices__
                                +                   -> m' ((Tensor Value Data.Int.Int64,
                                +                           TensorList (Value) dtypes))
                                +                   -- ^ (__key__, __values__)
                                +                   --
                                +                   -- * __key__
                                +                   --
                                +                   -- * __values__
                                +mapUnstageNoKey = mapUnstageNoKey' id
                                +mapUnstageNoKey' :: forall v'1 dtypes m' . (MonadBuild m',
                                +                                            TensorTypes dtypes) => OpParams ->
                                +                    Tensor v'1 Data.Int.Int32 -- ^ __indices__
                                +                    -> m' ((Tensor Value Data.Int.Int64,
                                +                            TensorList (Value) dtypes))
                                +                    -- ^ (__key__, __values__)
                                +                    --
                                +                    -- * __key__
                                +                    --
                                +                    -- * __values__
                                +mapUnstageNoKey' op'options indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices]
                                +        buildOp [] (opDef "MapUnstageNoKey"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "indices" type: DT_INT32 }
                                +output_arg { name: "key" type: DT_INT64 }
                                +output_arg { name: "values" type_list_attr: "dtypes" }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Multiply the matrix "a" by the matrix "b".
                                +--
                                +-- The inputs must be two-dimensional matrices and the inner dimension of
                                +-- "a" (after being transposed if transpose_a is true) must match the
                                +-- outer dimension of "b" (after being transposed if transposed_b is
                                +-- true).
                                +-- 
                                +-- *Note*: The default kernel implementation for MatMul on GPUs uses
                                +-- cublas.
                                +matMul :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float),
                                +                                      Data.Int.Int32, Data.Word.Word16, Double,
                                +                                      Float] t) => 
                                +          Tensor v'1 t -- ^ __a__
                                +          -> Tensor v'2 t -- ^ __b__
                                +          -> Tensor Build t -- ^ __product__
                                +matMul = matMul' id
                                +matMul' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Int.Int32, Data.Word.Word16, Double,
                                +                                       Float] t) => OpParams ->
                                +           Tensor v'1 t -- ^ __a__
                                +           -> Tensor v'2 t -- ^ __b__
                                +           -> Tensor Build t -- ^ __product__
                                +matMul' op'options a b | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a,
                                +                                                             buildInputs b]
                                +        return (opDef "MatMul"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "a" type_attr: "T" }
                                +input_arg { name: "b" type_attr: "T" }
                                +output_arg { name: "product" type_attr: "T" }
                                +attr {
                                +  name: "transpose_a"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, \"a\" is transposed before multiplication."
                                +}
                                +attr {
                                +  name: "transpose_b"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, \"b\" is transposed before multiplication."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns the set of files matching one or more glob patterns.
                                +--
                                +-- Note that this routine only supports wildcard characters in the
                                +-- basename portion of the pattern, not in the directory portion.
                                +matchingFiles :: 
                                +                 Tensor v'1 Data.ByteString.ByteString -- ^ __pattern__: Shell wildcard pattern(s). Scalar or vector of type string.
                                +                 -> Tensor Build Data.ByteString.ByteString -- ^ __filenames__: A vector of matching filenames.
                                +matchingFiles = matchingFiles' id
                                +matchingFiles' :: OpParams ->
                                +                  Tensor v'1 Data.ByteString.ByteString -- ^ __pattern__: Shell wildcard pattern(s). Scalar or vector of type string.
                                +                  -> Tensor Build Data.ByteString.ByteString -- ^ __filenames__: A vector of matching filenames.
                                +matchingFiles' op'options pattern | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs pattern]
                                +        return (opDef "MatchingFiles"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "pattern"
                                +  description: "Shell wildcard pattern(s). Scalar or vector of type string."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "filenames"
                                +  description: "A vector of matching filenames."
                                +  type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Copy a tensor setting everything outside a central band in each innermost matrix
                                +--
                                +-- to zero.
                                +-- 
                                +-- The `band` part is computed as follows:
                                +-- Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
                                +-- tensor with the same shape where
                                +-- 
                                +-- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
                                +-- 
                                +-- The indicator function
                                +-- 
                                +-- `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
                                +--                  (num_upper < 0 || (n-m) <= num_upper)`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # if 'input' is [[ 0,  1,  2, 3]
                                +--                  [-1,  0,  1, 2]
                                +--                  [-2, -1,  0, 1]
                                +--                  [-3, -2, -1, 0]],
                                +-- 
                                +-- tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
                                +--                                        [-1,  0,  1, 2]
                                +--                                        [ 0, -1,  0, 1]
                                +--                                        [ 0,  0, -1, 0]],
                                +-- 
                                +-- tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
                                +--                                       [-1,  0,  1, 0]
                                +--                                       [-2, -1,  0, 1]
                                +--                                       [ 0, -2, -1, 0]]
                                +-- ```
                                +-- 
                                +-- Useful special cases:
                                +-- 
                                +-- ```
                                +--  tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
                                +--  tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
                                +--  tf.matrix_band_part(input, 0, 0) ==> Diagonal.
                                +-- ```
                                +matrixBandPart :: forall v'1 v'2 v'3 t . (TensorType t) => 
                                +                  Tensor v'1 t -- ^ __input__: Rank `k` tensor.
                                +                  -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
                                +                                               -- lower triangle.
                                +                  -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__: 0-D tensor. Number of superdiagonals to keep. If negative, keep
                                +                                               -- entire upper triangle.
                                +                  -> Tensor Build t -- ^ __band__: Rank `k` tensor of the same shape as input. The extracted banded tensor.
                                +matrixBandPart = matrixBandPart' id
                                +matrixBandPart' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams ->
                                +                   Tensor v'1 t -- ^ __input__: Rank `k` tensor.
                                +                   -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
                                +                                                -- lower triangle.
                                +                   -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__: 0-D tensor. Number of superdiagonals to keep. If negative, keep
                                +                                                -- entire upper triangle.
                                +                   -> Tensor Build t -- ^ __band__: Rank `k` tensor of the same shape as input. The extracted banded tensor.
                                +matrixBandPart' op'options input num_lower num_upper | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs num_lower,
                                +                                                             buildInputs num_upper]
                                +        return (opDef "MatrixBandPart"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "Rank `k` tensor." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "num_lower"
                                +  description: "0-D tensor. Number of subdiagonals to keep. If negative, keep entire\nlower triangle."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "num_upper"
                                +  description: "0-D tensor. Number of superdiagonals to keep. If negative, keep\nentire upper triangle."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "band"
                                +  description: "Rank `k` tensor of the same shape as input. The extracted banded tensor."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Computes the determinant of one ore more square matrices.
                                +--
                                +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
                                +-- form square matrices. The output is a tensor containing the determinants
                                +-- for all input submatrices `[..., :, :]`.
                                +matrixDeterminant :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float),
                                +                                             Double, Float] t) => 
                                +                     Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`.
                                +                     -> Tensor Build t -- ^ __output__: Shape is `[...]`.
                                +matrixDeterminant = matrixDeterminant' id
                                +matrixDeterminant' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                              (Data.Complex.Complex Float),
                                +                                              Double, Float] t) => OpParams ->
                                +                      Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`.
                                +                      -> Tensor Build t -- ^ __output__: Shape is `[...]`.
                                +matrixDeterminant' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "MatrixDeterminant"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "Shape is `[..., M, M]`." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output" description: "Shape is `[...]`." type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns a batched diagonal tensor with a given batched diagonal values.
                                +--
                                +-- Given a `diagonal`, this operation returns a tensor with the `diagonal` and
                                +-- everything else padded with zeros. The diagonal is computed as follows:
                                +-- 
                                +-- Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
                                +-- tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
                                +-- 
                                +-- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
                                +-- 
                                +-- and diagonal.shape = (2, 4)
                                +-- 
                                +-- tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
                                +--                                      [0, 2, 0, 0]
                                +--                                      [0, 0, 3, 0]
                                +--                                      [0, 0, 0, 4]],
                                +--                                     [[5, 0, 0, 0]
                                +--                                      [0, 6, 0, 0]
                                +--                                      [0, 0, 7, 0]
                                +--                                      [0, 0, 0, 8]]]
                                +-- 
                                +-- which has shape (2, 4, 4)
                                +-- ```
                                +matrixDiag :: forall v'1 t . (TensorType t) => 
                                +              Tensor v'1 t -- ^ __diagonal__: Rank `k`, where `k >= 1`.
                                +              -> Tensor Build t -- ^ __output__: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
                                +matrixDiag = matrixDiag' id
                                +matrixDiag' :: forall v'1 t . (TensorType t) => OpParams ->
                                +               Tensor v'1 t -- ^ __diagonal__: Rank `k`, where `k >= 1`.
                                +               -> Tensor Build t -- ^ __output__: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
                                +matrixDiag' op'options diagonal | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonal]
                                +        return (opDef "MatrixDiag"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "diagonal"
                                +  description: "Rank `k`, where `k >= 1`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Returns the batched diagonal part of a batched tensor.
                                +--
                                +-- This operation returns a tensor with the `diagonal` part
                                +-- of the batched `input`. The `diagonal` part is computed as follows:
                                +-- 
                                +-- Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
                                +-- tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
                                +-- 
                                +-- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
                                +-- 
                                +-- The input must be at least a matrix.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 'input' is [[[1, 0, 0, 0]
                                +--                [0, 2, 0, 0]
                                +--                [0, 0, 3, 0]
                                +--                [0, 0, 0, 4]],
                                +--               [[5, 0, 0, 0]
                                +--                [0, 6, 0, 0]
                                +--                [0, 0, 7, 0]
                                +--                [0, 0, 0, 8]]]
                                +-- 
                                +-- and input.shape = (2, 4, 4)
                                +-- 
                                +-- tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
                                +-- 
                                +-- which has shape (2, 4)
                                +-- ```
                                +matrixDiagPart :: forall v'1 t . (TensorType t) => 
                                +                  Tensor v'1 t -- ^ __input__: Rank `k` tensor where `k >= 2`.
                                +                  -> Tensor Build t -- ^ __diagonal__: The extracted diagonal(s) having shape
                                +                  -- `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
                                +matrixDiagPart = matrixDiagPart' id
                                +matrixDiagPart' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                   Tensor v'1 t -- ^ __input__: Rank `k` tensor where `k >= 2`.
                                +                   -> Tensor Build t -- ^ __diagonal__: The extracted diagonal(s) having shape
                                +                   -- `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
                                +matrixDiagPart' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "MatrixDiagPart"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Rank `k` tensor where `k >= 2`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "diagonal"
                                +  description: "The extracted diagonal(s) having shape\n`diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Computes the inverse of one or more square invertible matrices or their
                                +--
                                +-- adjoints (conjugate transposes).
                                +-- 
                                +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
                                +-- form square matrices. The output is a tensor of the same shape as the input
                                +-- containing the inverse for all input submatrices `[..., :, :]`.
                                +-- 
                                +-- The op uses LU decomposition with partial pivoting to compute the inverses.
                                +-- 
                                +-- If a matrix is not invertible there is no guarantee what the op does. It
                                +-- may detect the condition and raise an exception or it may simply return a
                                +-- garbage result.
                                +matrixInverse :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float), Double,
                                +                                         Float] t) => 
                                +                 Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`.
                                +                 -> Tensor Build t -- ^ __output__: Shape is `[..., M, M]`.
                                +                 -- 
                                +                 -- @compatibility(numpy)
                                +                 -- Equivalent to np.linalg.inv
                                +                 -- @end_compatibility
                                +matrixInverse = matrixInverse' id
                                +matrixInverse' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                          (Data.Complex.Complex Float), Double,
                                +                                          Float] t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`.
                                +                  -> Tensor Build t -- ^ __output__: Shape is `[..., M, M]`.
                                +                  -- 
                                +                  -- @compatibility(numpy)
                                +                  -- Equivalent to np.linalg.inv
                                +                  -- @end_compatibility
                                +matrixInverse' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "MatrixInverse"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "Shape is `[..., M, M]`." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Shape is `[..., M, M]`.\n\n@compatibility(numpy)\nEquivalent to np.linalg.inv\n@end_compatibility"
                                +  type_attr: "T"
                                +}
                                +attr { name: "adjoint" type: "bool" default_value { b: false } }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_DOUBLE
                                +      type: DT_FLOAT
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns a batched matrix tensor with new batched diagonal values.
                                +--
                                +-- Given `input` and `diagonal`, this operation returns a tensor with the
                                +-- same shape and values as `input`, except for the main diagonal of the
                                +-- innermost matrices.  These will be overwritten by the values in `diagonal`.
                                +-- 
                                +-- The output is computed as follows:
                                +-- 
                                +-- Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
                                +-- `k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
                                +-- tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
                                +-- 
                                +--   * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
                                +--   * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
                                +matrixSetDiag :: forall v'1 v'2 t . (TensorType t) => 
                                +                 Tensor v'1 t -- ^ __input__: Rank `k+1`, where `k >= 1`.
                                +                 -> Tensor v'2 t -- ^ __diagonal__: Rank `k`, where `k >= 1`.
                                +                 -> Tensor Build t -- ^ __output__: Rank `k+1`, with `output.shape = input.shape`.
                                +matrixSetDiag = matrixSetDiag' id
                                +matrixSetDiag' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __input__: Rank `k+1`, where `k >= 1`.
                                +                  -> Tensor v'2 t -- ^ __diagonal__: Rank `k`, where `k >= 1`.
                                +                  -> Tensor Build t -- ^ __output__: Rank `k+1`, with `output.shape = input.shape`.
                                +matrixSetDiag' op'options input diagonal | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs diagonal]
                                +        return (opDef "MatrixSetDiag"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Rank `k+1`, where `k >= 1`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "diagonal"
                                +  description: "Rank `k`, where `k >= 1`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Rank `k+1`, with `output.shape = input.shape`."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Solves systems of linear equations.
                                +--
                                +-- `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
                                +-- form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
                                +-- a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
                                +-- satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
                                +-- If `adjoint` is `True` then each output matrix satisfies
                                +-- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
                                +matrixSolve :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                           (Data.Complex.Complex Float), Double,
                                +                                           Float] t) => 
                                +               Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, M]`.
                                +               -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`.
                                +               -> Tensor Build t -- ^ __output__: Shape is `[..., M, K]`.
                                +matrixSolve = matrixSolve' id
                                +matrixSolve' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Double, Float] t) => OpParams ->
                                +                Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, M]`.
                                +                -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`.
                                +                -> Tensor Build t -- ^ __output__: Shape is `[..., M, K]`.
                                +matrixSolve' op'options matrix rhs | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix,
                                +                                                             buildInputs rhs]
                                +        return (opDef "MatrixSolve"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "matrix"
                                +  description: "Shape is `[..., M, M]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rhs" description: "Shape is `[..., M, K]`." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Shape is `[..., M, K]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "adjoint"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\nadjoint."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_DOUBLE
                                +      type: DT_FLOAT
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Solves one or more linear least-squares problems.
                                +--
                                +-- `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
                                +-- form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`.
                                +-- The output is a tensor shape `[..., N, K]` where each output matrix solves
                                +-- each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]
                                +-- in the least squares sense.
                                +-- 
                                +-- matrix and right-hand sides in the batch:
                                +-- 
                                +-- `matrix`=\\(A \in \Re^{m \times n}\\),
                                +-- `rhs`=\\(B  \in \Re^{m \times k}\\),
                                +-- `output`=\\(X  \in \Re^{n \times k}\\),
                                +-- `l2_regularizer`=\\(\lambda\\).
                                +-- 
                                +-- If `fast` is `True`, then the solution is computed by solving the normal
                                +-- equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
                                +-- \\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
                                +-- problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 +
                                +-- \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
                                +-- \\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
                                +-- minimum-norm solution to the under-determined linear system, i.e.
                                +-- \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||Z||_F^2 \\), subject to
                                +-- \\(A Z = B\\). Notice that the fast path is only numerically stable when
                                +-- \\(A\\) is numerically full rank and has a condition number
                                +-- \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or\\(\lambda\\) is
                                +-- sufficiently large.
                                +-- 
                                +-- If `fast` is `False` an algorithm based on the numerically robust complete
                                +-- orthogonal decomposition is used. This computes the minimum-norm
                                +-- least-squares solution, even when \\(A\\) is rank deficient. This path is
                                +-- typically 6-7 times slower than the fast path. If `fast` is `False` then
                                +-- `l2_regularizer` is ignored.
                                +matrixSolveLs :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => 
                                +                 Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, N]`.
                                +                 -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`.
                                +                 -> Tensor v'3 Double -- ^ __l2_regularizer__: Scalar tensor.
                                +                                      -- 
                                +                                      -- @compatibility(numpy)
                                +                                      -- Equivalent to np.linalg.lstsq
                                +                                      -- @end_compatibility
                                +                 -> Tensor Build t -- ^ __output__: Shape is `[..., N, K]`.
                                +matrixSolveLs = matrixSolveLs' id
                                +matrixSolveLs' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) =>
                                +                  OpParams ->
                                +                  Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, N]`.
                                +                  -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`.
                                +                  -> Tensor v'3 Double -- ^ __l2_regularizer__: Scalar tensor.
                                +                                       -- 
                                +                                       -- @compatibility(numpy)
                                +                                       -- Equivalent to np.linalg.lstsq
                                +                                       -- @end_compatibility
                                +                  -> Tensor Build t -- ^ __output__: Shape is `[..., N, K]`.
                                +matrixSolveLs' op'options matrix rhs l2_regularizer | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix,
                                +                                                             buildInputs rhs,
                                +                                                             buildInputs l2_regularizer]
                                +        return (opDef "MatrixSolveLs"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "matrix"
                                +  description: "Shape is `[..., M, N]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rhs" description: "Shape is `[..., M, K]`." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2_regularizer"
                                +  description: "Scalar tensor.\n\n@compatibility(numpy)\nEquivalent to np.linalg.lstsq\n@end_compatibility"
                                +  type: DT_DOUBLE
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Shape is `[..., N, K]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
                                +}
                                +attr { name: "fast" type: "bool" default_value { b: true } }
                                +-}
                                +
                                +-- | Solves systems of linear equations with upper or lower triangular matrices by
                                +--
                                +-- backsubstitution.
                                +-- 
                                +-- `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
                                +-- square matrices. If `lower` is `True` then the strictly upper triangular part
                                +-- of each inner-most matrix is assumed to be zero and not accessed.
                                +-- If `lower` is False then the strictly lower triangular part of each inner-most
                                +-- matrix is assumed to be zero and not accessed.
                                +-- `rhs` is a tensor of shape `[..., M, K]`.
                                +-- 
                                +-- The output is a tensor of shape `[..., M, K]`. If `adjoint` is
                                +-- `True` then the innermost matrices in output` satisfy matrix equations
                                +-- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
                                +-- If `adjoint` is `False` then the strictly then the  innermost matrices in
                                +-- `output` satisfy matrix equations
                                +-- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
                                +matrixTriangularSolve :: forall v'1 v'2
                                +                         t . (OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float), Double,
                                +                                      Float] t) => 
                                +                         Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, M]`.
                                +                         -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`.
                                +                         -> Tensor Build t -- ^ __output__: Shape is `[..., M, K]`.
                                +matrixTriangularSolve = matrixTriangularSolve' id
                                +matrixTriangularSolve' :: forall v'1 v'2
                                +                          t . (OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float), Double,
                                +                                       Float] t) => OpParams ->
                                +                          Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, M]`.
                                +                          -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`.
                                +                          -> Tensor Build t -- ^ __output__: Shape is `[..., M, K]`.
                                +matrixTriangularSolve' op'options matrix rhs | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix,
                                +                                                             buildInputs rhs]
                                +        return (opDef "MatrixTriangularSolve"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "matrix"
                                +  description: "Shape is `[..., M, M]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rhs" description: "Shape is `[..., M, K]`." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Shape is `[..., M, K]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "lower"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "Boolean indicating whether the innermost matrices in `matrix` are\nlower or upper triangular."
                                +}
                                +attr {
                                +  name: "adjoint"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\n         adjoint.\n\n@compatibility(numpy)\nEquivalent to np.linalg.triangular_solve\n@end_compatibility"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_DOUBLE
                                +      type: DT_FLOAT
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the maximum of elements across dimensions of a tensor.
                                +--
                                +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
                                +-- retained with length 1.
                                +max :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t,
                                +                                OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
                                +       
                                +       Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +       -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +       -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +max = max' id
                                +max' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t,
                                +                                 OneOf '[Data.Int.Int32,
                                +                                         Data.Int.Int64] tidx) => OpParams ->
                                +        Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +        -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +        -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +max' op'options input reduction_indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs reduction_indices]
                                +        return (opDef "Max"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The tensor to reduce." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "reduction_indices"
                                +  description: "The dimensions to reduce."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output" description: "The reduced tensor." type_attr: "T"
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Performs max pooling on the input.
                                +
                                +maxPool :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Int.Int64, Data.Int.Int8,
                                +                                   Data.Word.Word16, Data.Word.Word8, Double,
                                +                                   Float] t) => 
                                +           Tensor v'1 t -- ^ __input__: 4-D input to pool over.
                                +           -> Tensor Build t -- ^ __output__: The max pooled output tensor.
                                +maxPool = maxPool' id
                                +maxPool' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => OpParams ->
                                +            Tensor v'1 t -- ^ __input__: 4-D input to pool over.
                                +            -> Tensor Build t -- ^ __output__: The max pooled output tensor.
                                +maxPool' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "MaxPool"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "4-D input to pool over." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The max pooled output tensor."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the window for each dimension of the input tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +-}
                                +
                                +-- | Performs 3D max pooling on the input.
                                +
                                +maxPool3D :: forall v'1 t . (OneOf '[Float] t) => 
                                +             Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
                                +             -> Tensor Build t -- ^ __output__: The max pooled output tensor.
                                +maxPool3D = maxPool3D' id
                                +maxPool3D' :: forall v'1 t . (OneOf '[Float] t) => OpParams ->
                                +              Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
                                +              -> Tensor Build t -- ^ __output__: The max pooled output tensor.
                                +maxPool3D' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "MaxPool3D"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The max pooled output tensor."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NDHWC" }
                                +  description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
                                +  allowed_values { list { s: "NDHWC" s: "NCDHW" } }
                                +}
                                +attr {
                                +  name: "T" type: "type" allowed_values { list { type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | Computes gradients of max pooling function.
                                +
                                +maxPool3DGrad :: forall v'1 v'2 v'3 t tInput . (OneOf '[Float] t,
                                +                                                OneOf '[Float] tInput) => 
                                +                 Tensor v'1 tInput -- ^ __orig_input__: The original input tensor.
                                +                 -> Tensor v'2 tInput -- ^ __orig_output__: The original output tensor.
                                +                 -> Tensor v'3 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
                                +                 -> Tensor Build t -- ^ __output__
                                +maxPool3DGrad = maxPool3DGrad' id
                                +maxPool3DGrad' :: forall v'1 v'2 v'3 t tInput . (OneOf '[Float] t,
                                +                                                 OneOf '[Float] tInput) =>
                                +                  OpParams ->
                                +                  Tensor v'1 tInput -- ^ __orig_input__: The original input tensor.
                                +                  -> Tensor v'2 tInput -- ^ __orig_output__: The original output tensor.
                                +                  -> Tensor v'3 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
                                +                  -> Tensor Build t -- ^ __output__
                                +maxPool3DGrad' op'options orig_input orig_output grad | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input,
                                +                                                             buildInputs orig_output,
                                +                                                             buildInputs grad]
                                +        return (opDef "MaxPool3DGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "TInput" .~ tensorType (undefined :: tInput)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "orig_input"
                                +  description: "The original input tensor."
                                +  type_attr: "TInput"
                                +}
                                +input_arg {
                                +  name: "orig_output"
                                +  description: "The original output tensor."
                                +  type_attr: "TInput"
                                +}
                                +input_arg {
                                +  name: "grad"
                                +  description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NDHWC" }
                                +  description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
                                +  allowed_values { list { s: "NDHWC" s: "NCDHW" } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT } }
                                +}
                                +attr {
                                +  name: "TInput"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | Computes second-order gradients of the maxpooling function.
                                +
                                +maxPool3DGradGrad :: forall v'1 v'2 v'3 t . (OneOf '[Float] t) => 
                                +                     Tensor v'1 t -- ^ __orig_input__: The original input tensor.
                                +                     -> Tensor v'2 t -- ^ __orig_output__: The original output tensor.
                                +                     -> Tensor v'3 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
                                +                     -> Tensor Build t -- ^ __output__: Gradients of gradients w.r.t. the input to `max_pool`.
                                +maxPool3DGradGrad = maxPool3DGradGrad' id
                                +maxPool3DGradGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Float] t) => OpParams ->
                                +                      Tensor v'1 t -- ^ __orig_input__: The original input tensor.
                                +                      -> Tensor v'2 t -- ^ __orig_output__: The original output tensor.
                                +                      -> Tensor v'3 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
                                +                      -> Tensor Build t -- ^ __output__: Gradients of gradients w.r.t. the input to `max_pool`.
                                +maxPool3DGradGrad' op'options orig_input orig_output grad | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input,
                                +                                                             buildInputs orig_output,
                                +                                                             buildInputs grad]
                                +        return (opDef "MaxPool3DGradGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "orig_input"
                                +  description: "The original input tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "orig_output"
                                +  description: "The original output tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad"
                                +  description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Gradients of gradients w.r.t. the input to `max_pool`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
                                +  has_minimum: true
                                +  minimum: 5
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NDHWC" }
                                +  description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
                                +  allowed_values { list { s: "NDHWC" s: "NCDHW" } }
                                +}
                                +attr {
                                +  name: "T" type: "type" allowed_values { list { type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | Computes gradients of the maxpooling function.
                                +
                                +maxPoolGrad :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                               Data.Int.Int64, Data.Int.Int8,
                                +                                               Data.Word.Word16,
                                +                                               Data.Word.Word8, Double,
                                +                                               Float] t) => 
                                +               Tensor v'1 t -- ^ __orig_input__: The original input tensor.
                                +               -> Tensor v'2 t -- ^ __orig_output__: The original output tensor.
                                +               -> Tensor v'3 t -- ^ __grad__: 4-D.  Gradients w.r.t. the output of `max_pool`.
                                +               -> Tensor Build t -- ^ __output__: Gradients w.r.t. the input to `max_pool`.
                                +maxPoolGrad = maxPoolGrad' id
                                +maxPoolGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                                Data.Int.Int64, Data.Int.Int8,
                                +                                                Data.Word.Word16,
                                +                                                Data.Word.Word8, Double,
                                +                                                Float] t) => OpParams ->
                                +                Tensor v'1 t -- ^ __orig_input__: The original input tensor.
                                +                -> Tensor v'2 t -- ^ __orig_output__: The original output tensor.
                                +                -> Tensor v'3 t -- ^ __grad__: 4-D.  Gradients w.r.t. the output of `max_pool`.
                                +                -> Tensor Build t -- ^ __output__: Gradients w.r.t. the input to `max_pool`.
                                +maxPoolGrad' op'options orig_input orig_output grad | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input,
                                +                                                             buildInputs orig_output,
                                +                                                             buildInputs grad]
                                +        return (opDef "MaxPoolGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "orig_input"
                                +  description: "The original input tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "orig_output"
                                +  description: "The original output tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad"
                                +  description: "4-D.  Gradients w.r.t. the output of `max_pool`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Gradients w.r.t. the input to `max_pool`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the window for each dimension of the input tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes second-order gradients of the maxpooling function.
                                +
                                +maxPoolGradGrad :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                   Data.Int.Int32,
                                +                                                   Data.Int.Int64,
                                +                                                   Data.Int.Int8,
                                +                                                   Data.Word.Word16,
                                +                                                   Data.Word.Word8, Double,
                                +                                                   Float] t) => 
                                +                   Tensor v'1 t -- ^ __orig_input__: The original input tensor.
                                +                   -> Tensor v'2 t -- ^ __orig_output__: The original output tensor.
                                +                   -> Tensor v'3 t -- ^ __grad__: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
                                +                   -> Tensor Build t -- ^ __output__: Gradients of gradients w.r.t. the input to `max_pool`.
                                +maxPoolGradGrad = maxPoolGradGrad' id
                                +maxPoolGradGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                    Data.Int.Int32,
                                +                                                    Data.Int.Int64,
                                +                                                    Data.Int.Int8,
                                +                                                    Data.Word.Word16,
                                +                                                    Data.Word.Word8, Double,
                                +                                                    Float] t) => OpParams ->
                                +                    Tensor v'1 t -- ^ __orig_input__: The original input tensor.
                                +                    -> Tensor v'2 t -- ^ __orig_output__: The original output tensor.
                                +                    -> Tensor v'3 t -- ^ __grad__: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
                                +                    -> Tensor Build t -- ^ __output__: Gradients of gradients w.r.t. the input to `max_pool`.
                                +maxPoolGradGrad' op'options orig_input orig_output grad | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input,
                                +                                                             buildInputs orig_output,
                                +                                                             buildInputs grad]
                                +        return (opDef "MaxPoolGradGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "orig_input"
                                +  description: "The original input tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "orig_output"
                                +  description: "The original output tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad"
                                +  description: "4-D.  Gradients of gradients w.r.t. the input of `max_pool`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Gradients of gradients w.r.t. the input to `max_pool`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the window for each dimension of the input tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "data_format"
                                +  type: "string"
                                +  default_value { s: "NHWC" }
                                +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
                                +  allowed_values { list { s: "NHWC" s: "NCHW" } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes second-order gradients of the maxpooling function.
                                +
                                +maxPoolGradGradWithArgmax :: forall v'1 v'2 v'3 targmax
                                +                             t . (OneOf '[Data.Int.Int32,
                                +                                          Data.Int.Int64] targmax,
                                +                                  OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t) => 
                                +                             Tensor v'1 t -- ^ __input__: The original input.
                                +                             -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
                                +                                             -- input of `max_pool`.
                                +                             -> Tensor v'3 targmax -- ^ __argmax__: The indices of the maximum values chosen for each output of `max_pool`.
                                +                             -> Tensor Build t -- ^ __output__: Gradients of gradients w.r.t. the input of `max_pool`.
                                +maxPoolGradGradWithArgmax = maxPoolGradGradWithArgmax' id
                                +maxPoolGradGradWithArgmax' :: forall v'1 v'2 v'3 targmax
                                +                              t . (OneOf '[Data.Int.Int32,
                                +                                           Data.Int.Int64] targmax,
                                +                                   OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16, Data.Word.Word8,
                                +                                           Double, Float] t) => OpParams ->
                                +                              Tensor v'1 t -- ^ __input__: The original input.
                                +                              -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
                                +                                              -- input of `max_pool`.
                                +                              -> Tensor v'3 targmax -- ^ __argmax__: The indices of the maximum values chosen for each output of `max_pool`.
                                +                              -> Tensor Build t -- ^ __output__: Gradients of gradients w.r.t. the input of `max_pool`.
                                +maxPoolGradGradWithArgmax' op'options input grad argmax | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs argmax]
                                +        return (opDef "MaxPoolGradGradWithArgmax"
                                +                & opAttr "Targmax" .~ tensorType (undefined :: targmax)
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The original input." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad"
                                +  description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the\ninput of `max_pool`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "argmax"
                                +  description: "The indices of the maximum values chosen for each output of `max_pool`."
                                +  type_attr: "Targmax"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Gradients of gradients w.r.t. the input of `max_pool`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the window for each dimension of the input tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "Targmax"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes gradients of the maxpooling function.
                                +
                                +maxPoolGradWithArgmax :: forall v'1 v'2 v'3 targmax t . (OneOf '[Data.Int.Int32,
                                +                                                                 Data.Int.Int64] targmax,
                                +                                                         OneOf '[Data.Int.Int16,
                                +                                                                 Data.Int.Int32,
                                +                                                                 Data.Int.Int64,
                                +                                                                 Data.Int.Int8,
                                +                                                                 Data.Word.Word16,
                                +                                                                 Data.Word.Word8,
                                +                                                                 Double,
                                +                                                                 Float] t) => 
                                +                         Tensor v'1 t -- ^ __input__: The original input.
                                +                         -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
                                +                                         -- output of `max_pool`.
                                +                         -> Tensor v'3 targmax -- ^ __argmax__: The indices of the maximum values chosen for each output of `max_pool`.
                                +                         -> Tensor Build t -- ^ __output__: Gradients w.r.t. the input of `max_pool`.
                                +maxPoolGradWithArgmax = maxPoolGradWithArgmax' id
                                +maxPoolGradWithArgmax' :: forall v'1 v'2 v'3 targmax
                                +                          t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax,
                                +                               OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t) => OpParams ->
                                +                          Tensor v'1 t -- ^ __input__: The original input.
                                +                          -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
                                +                                          -- output of `max_pool`.
                                +                          -> Tensor v'3 targmax -- ^ __argmax__: The indices of the maximum values chosen for each output of `max_pool`.
                                +                          -> Tensor Build t -- ^ __output__: Gradients w.r.t. the input of `max_pool`.
                                +maxPoolGradWithArgmax' op'options input grad argmax | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs argmax]
                                +        return (opDef "MaxPoolGradWithArgmax"
                                +                & opAttr "Targmax" .~ tensorType (undefined :: targmax)
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The original input." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad"
                                +  description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the\noutput of `max_pool`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "argmax"
                                +  description: "The indices of the maximum values chosen for each output of `max_pool`."
                                +  type_attr: "Targmax"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Gradients w.r.t. the input of `max_pool`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the window for each dimension of the input tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "Targmax"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Performs max pooling on the input and outputs both max values and indices.
                                +--
                                +-- The indices in `argmax` are flattened, so that a maximum value at position
                                +-- `[b, y, x, c]` becomes flattened index
                                +-- `((b * height + y) * width + x) * channels + c`.
                                +-- 
                                +-- The indices returned are always in `[0, height) x [0, width)` before flattening,
                                +-- even if padding is involved and the mathematically correct answer is outside
                                +-- (either negative or too large).  This is a bug, but fixing it is difficult to do
                                +-- in a safe backwards compatible way, especially due to flattening.
                                +maxPoolWithArgmax :: forall v'1 targmax t . (OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] targmax,
                                +                                             OneOf '[Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Int.Int64,
                                +                                                     Data.Int.Int8,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8, Double,
                                +                                                     Float] t) => 
                                +                     Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.
                                +                     -> (Tensor Build t, Tensor Build targmax)
                                +                     -- ^ (__output__, __argmax__)
                                +                     --
                                +                     -- * __output__: The max pooled output tensor.
                                +                     --
                                +                     -- * __argmax__: 4-D.  The flattened indices of the max values chosen for each output.
                                +maxPoolWithArgmax = maxPoolWithArgmax' id
                                +maxPoolWithArgmax' :: forall v'1 targmax t . (OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] targmax,
                                +                                              OneOf '[Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t) => OpParams ->
                                +                      Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.
                                +                      -> (Tensor Build t, Tensor Build targmax)
                                +                      -- ^ (__output__, __argmax__)
                                +                      --
                                +                      -- * __output__: The max pooled output tensor.
                                +                      --
                                +                      -- * __argmax__: 4-D.  The flattened indices of the max values chosen for each output.
                                +maxPoolWithArgmax' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "MaxPoolWithArgmax"
                                +                & opAttr "Targmax" .~ tensorType (undefined :: targmax)
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape `[batch, height, width, channels]`.  Input to pool over."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The max pooled output tensor."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "argmax"
                                +  description: "4-D.  The flattened indices of the max values chosen for each output."
                                +  type_attr: "Targmax"
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the window for each dimension of the input tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
                                +  has_minimum: true
                                +  minimum: 4
                                +}
                                +attr {
                                +  name: "Targmax"
                                +  type: "type"
                                +  default_value { type: DT_INT64 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns the max of x and y (i.e. x > y ? x : y) element-wise.
                                +--
                                +-- *NOTE*: `Maximum` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +maximum :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                       Data.Word.Word16, Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor v'2 t -- ^ __y__
                                +           -> Tensor Build t -- ^ __z__
                                +maximum = maximum' id
                                +maximum' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                        Data.Word.Word16, Double, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build t -- ^ __z__
                                +maximum' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Maximum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the mean of elements across dimensions of a tensor.
                                +--
                                +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
                                +-- retained with length 1.
                                +mean :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t,
                                +                                 OneOf '[Data.Int.Int32,
                                +                                         Data.Int.Int64] tidx) => 
                                +        Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +        -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +        -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +mean = mean' id
                                +mean' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                          (Data.Complex.Complex Float),
                                +                                          Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t,
                                +                                  OneOf '[Data.Int.Int32,
                                +                                          Data.Int.Int64] tidx) => OpParams ->
                                +         Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +         -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +         -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +mean' op'options input reduction_indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs reduction_indices]
                                +        return (opDef "Mean"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The tensor to reduce." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "reduction_indices"
                                +  description: "The dimensions to reduce."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output" description: "The reduced tensor." type_attr: "T"
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Forwards the value of an available tensor from `inputs` to `output`.
                                +--
                                +-- `Merge` waits for at least one of the tensors in `inputs` to become available.
                                +-- It is usually combined with `Switch` to implement branching.
                                +-- 
                                +-- `Merge` forwards the first tensor to become available to `output`, and sets
                                +-- `value_index` to its index in `inputs`.
                                +merge :: forall v'1 t . (TensorType t) => 
                                +         [Tensor v'1 t] -- ^ __inputs__: The input tensors, exactly one of which will become available.
                                +         -> (Tensor Build t, Tensor Build Data.Int.Int32)
                                +         -- ^ (__output__, __value_index__)
                                +         --
                                +         -- * __output__: Will be set to the available input tensor.
                                +         --
                                +         -- * __value_index__: The index of the chosen input tensor in `inputs`.
                                +merge = merge' id
                                +merge' :: forall v'1 t . (TensorType t) => OpParams ->
                                +          [Tensor v'1 t] -- ^ __inputs__: The input tensors, exactly one of which will become available.
                                +          -> (Tensor Build t, Tensor Build Data.Int.Int32)
                                +          -- ^ (__output__, __value_index__)
                                +          --
                                +          -- * __output__: Will be set to the available input tensor.
                                +          --
                                +          -- * __value_index__: The index of the chosen input tensor in `inputs`.
                                +merge' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs]
                                +        return (opDef "Merge"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length inputs) :: Int64
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "The input tensors, exactly one of which will become available."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Will be set to the available input tensor."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "value_index"
                                +  description: "The index of the chosen input tensor in `inputs`."
                                +  type: DT_INT32
                                +}
                                +attr { name: "T" type: "type" }
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +-}
                                +
                                +-- | Merges summaries.
                                +--
                                +-- This op creates a
                                +-- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
                                +-- protocol buffer that contains the union of all the values in the input
                                +-- summaries.
                                +-- 
                                +-- When the Op is run, it reports an `InvalidArgument` error if multiple values
                                +-- in the summaries to merge use the same tag.
                                +mergeSummary :: 
                                +                [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__: Can be of any shape.  Each must contain serialized `Summary` protocol
                                +                                                        -- buffers.
                                +                -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +mergeSummary = mergeSummary' id
                                +mergeSummary' :: OpParams ->
                                +                 [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__: Can be of any shape.  Each must contain serialized `Summary` protocol
                                +                                                         -- buffers.
                                +                 -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
                                +mergeSummary' op'options
                                +              inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs]
                                +        return (opDef "MergeSummary"
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length inputs) :: Int64
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "Can be of any shape.  Each must contain serialized `Summary` protocol\nbuffers."
                                +  type: DT_STRING
                                +  number_attr: "N"
                                +}
                                +output_arg {
                                +  name: "summary"
                                +  description: "Scalar. Serialized `Summary` protocol buffer."
                                +  type: DT_STRING
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +-}
                                +
                                +-- | V2 format specific: merges the metadata files of sharded checkpoints.  The
                                +--
                                +-- result is one logical checkpoint, with one physical metadata file and renamed
                                +-- data files.
                                +-- 
                                +-- Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
                                +-- 
                                +-- If delete_old_dirs is true, attempts to delete recursively the dirname of each
                                +-- path in the input checkpoint_prefixes.  This is useful when those paths are non
                                +-- user-facing temporary locations.
                                +mergeV2Checkpoints :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                      Tensor v'1 Data.ByteString.ByteString -- ^ __checkpoint_prefixes__: prefixes of V2 checkpoints to merge.
                                +                      -> Tensor v'2 Data.ByteString.ByteString -- ^ __destination_prefix__: scalar.  The desired final prefix.  Allowed to be the same
                                +                                                               -- as one of the checkpoint_prefixes.
                                +                      -> m' (ControlNode)
                                +mergeV2Checkpoints = mergeV2Checkpoints' id
                                +mergeV2Checkpoints' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                       Tensor v'1 Data.ByteString.ByteString -- ^ __checkpoint_prefixes__: prefixes of V2 checkpoints to merge.
                                +                       -> Tensor v'2 Data.ByteString.ByteString -- ^ __destination_prefix__: scalar.  The desired final prefix.  Allowed to be the same
                                +                                                                -- as one of the checkpoint_prefixes.
                                +                       -> m' (ControlNode)
                                +mergeV2Checkpoints' op'options checkpoint_prefixes
                                +                    destination_prefix | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs checkpoint_prefixes,
                                +                                                             buildInputs destination_prefix]
                                +        buildOp [] (opDef "MergeV2Checkpoints"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "checkpoint_prefixes"
                                +  description: "prefixes of V2 checkpoints to merge."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "destination_prefix"
                                +  description: "scalar.  The desired final prefix.  Allowed to be the same\nas one of the checkpoint_prefixes."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "delete_old_dirs"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "see above."
                                +}
                                +-}
                                +
                                +-- | Transforms a spectrogram into a form that's useful for speech recognition.
                                +--
                                +-- Mel Frequency Cepstral Coefficients are a way of representing audio data that's
                                +-- been effective as an input feature for machine learning. They are created by
                                +-- taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
                                +-- higher frequencies that are less significant to the human ear. They have a long
                                +-- history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
                                +-- is a good resource to learn more.
                                +mfcc :: 
                                +        Tensor v'1 Float -- ^ __spectrogram__: Typically produced by the Spectrogram op, with magnitude_squared
                                +                         -- set to true.
                                +        -> Tensor v'2 Data.Int.Int32 -- ^ __sample_rate__: How many samples per second the source audio used.
                                +        -> Tensor Build Float -- ^ __output__
                                +mfcc = mfcc' id
                                +mfcc' :: OpParams ->
                                +         Tensor v'1 Float -- ^ __spectrogram__: Typically produced by the Spectrogram op, with magnitude_squared
                                +                          -- set to true.
                                +         -> Tensor v'2 Data.Int.Int32 -- ^ __sample_rate__: How many samples per second the source audio used.
                                +         -> Tensor Build Float -- ^ __output__
                                +mfcc' op'options spectrogram sample_rate | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs spectrogram,
                                +                                                             buildInputs sample_rate]
                                +        return (opDef "Mfcc"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "spectrogram"
                                +  description: "Typically produced by the Spectrogram op, with magnitude_squared\nset to true."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "sample_rate"
                                +  description: "How many samples per second the source audio used."
                                +  type: DT_INT32
                                +}
                                +output_arg { name: "output" type: DT_FLOAT }
                                +attr {
                                +  name: "upper_frequency_limit"
                                +  type: "float"
                                +  default_value { f: 4000.0 }
                                +  description: "The highest frequency to use when calculating the\nceptstrum."
                                +}
                                +attr {
                                +  name: "lower_frequency_limit"
                                +  type: "float"
                                +  default_value { f: 20.0 }
                                +  description: "The lowest frequency to use when calculating the\nceptstrum."
                                +}
                                +attr {
                                +  name: "filterbank_channel_count"
                                +  type: "int"
                                +  default_value { i: 40 }
                                +  description: "Resolution of the Mel bank used internally."
                                +}
                                +attr {
                                +  name: "dct_coefficient_count"
                                +  type: "int"
                                +  default_value { i: 13 }
                                +  description: "How many output channels to produce per time slice."
                                +}
                                +-}
                                +
                                +-- | Computes the minimum of elements across dimensions of a tensor.
                                +--
                                +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
                                +-- retained with length 1.
                                +min :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t,
                                +                                OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
                                +       
                                +       Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +       -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +       -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +min = min' id
                                +min' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t,
                                +                                 OneOf '[Data.Int.Int32,
                                +                                         Data.Int.Int64] tidx) => OpParams ->
                                +        Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +        -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +        -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +min' op'options input reduction_indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs reduction_indices]
                                +        return (opDef "Min"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The tensor to reduce." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "reduction_indices"
                                +  description: "The dimensions to reduce."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output" description: "The reduced tensor." type_attr: "T"
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Returns the min of x and y (i.e. x < y ? x : y) element-wise.
                                +--
                                +-- *NOTE*: `Minimum` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +minimum :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                       Data.Word.Word16, Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor v'2 t -- ^ __y__
                                +           -> Tensor Build t -- ^ __z__
                                +minimum = minimum' id
                                +minimum' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                        Data.Word.Word16, Double, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build t -- ^ __z__
                                +minimum' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Minimum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Pads a tensor with mirrored values.
                                +--
                                +-- This operation pads a `input` with mirrored values according to the `paddings`
                                +-- you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
                                +-- the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
                                +-- how many values to add before the contents of `input` in that dimension, and
                                +-- `paddings[D, 1]` indicates how many values to add after the contents of `input`
                                +-- in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
                                +-- than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
                                +-- (if false, respectively).
                                +-- 
                                +-- The padded size of each dimension D of the output is:
                                +-- 
                                +-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 't' is [[1, 2, 3], [4, 5, 6]].
                                +-- # 'paddings' is [[1, 1]], [2, 2]].
                                +-- # 'mode' is SYMMETRIC.
                                +-- # rank of 't' is 2.
                                +-- pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
                                +--                       [2, 1, 1, 2, 3, 3, 2]
                                +--                       [5, 4, 4, 5, 6, 6, 5]
                                +--                       [5, 4, 4, 5, 6, 6, 5]]
                                +-- ```
                                +mirrorPad :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                                 Data.Int.Int64] tpaddings) =>
                                +             
                                +             Tensor v'1 t -- ^ __input__: The input tensor to be padded.
                                +             -> Tensor v'2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
                                +                                     -- rows must be the same as the rank of `input`.
                                +             -> Tensor Build t -- ^ __output__: The padded tensor.
                                +mirrorPad = mirrorPad' id
                                +mirrorPad' :: forall v'1 v'2 t tpaddings . (TensorType t,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tpaddings) =>
                                +              OpParams ->
                                +              Tensor v'1 t -- ^ __input__: The input tensor to be padded.
                                +              -> Tensor v'2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
                                +                                      -- rows must be the same as the rank of `input`.
                                +              -> Tensor Build t -- ^ __output__: The padded tensor.
                                +mirrorPad' op'options input paddings | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs paddings]
                                +        return (opDef "MirrorPad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "The input tensor to be padded."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "paddings"
                                +  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
                                +  type_attr: "Tpaddings"
                                +}
                                +output_arg {
                                +  name: "output" description: "The padded tensor." type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tpaddings"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "mode"
                                +  type: "string"
                                +  description: "Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions\ndo not include the borders, while in symmetric mode the padded regions\ndo include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`\nis `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and\nit is `[1, 2, 3, 3, 2]` in symmetric mode."
                                +  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
                                +}
                                +-}
                                +
                                +-- | Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
                                +--
                                +-- This operation folds the padded areas of `input` by `MirrorPad` according to the
                                +-- `paddings` you specify. `paddings` must be the same as `paddings` argument
                                +-- given to the corresponding `MirrorPad` op.
                                +-- 
                                +-- The folded size of each dimension D of the output is:
                                +-- 
                                +-- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
                                +-- # 'paddings' is [[0, 1]], [0, 1]].
                                +-- # 'mode' is SYMMETRIC.
                                +-- # rank of 't' is 2.
                                +-- pad(t, paddings) ==> [[ 1,  5]
                                +--                       [11, 28]]
                                +-- ```
                                +mirrorPadGrad :: forall v'1 v'2 t tpaddings . (TensorType t,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tpaddings) =>
                                +                 
                                +                 Tensor v'1 t -- ^ __input__: The input tensor to be folded.
                                +                 -> Tensor v'2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
                                +                                         -- rows must be the same as the rank of `input`.
                                +                 -> Tensor Build t -- ^ __output__: The folded tensor.
                                +mirrorPadGrad = mirrorPadGrad' id
                                +mirrorPadGrad' :: forall v'1 v'2 t tpaddings . (TensorType t,
                                +                                                OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] tpaddings) =>
                                +                  OpParams ->
                                +                  Tensor v'1 t -- ^ __input__: The input tensor to be folded.
                                +                  -> Tensor v'2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
                                +                                          -- rows must be the same as the rank of `input`.
                                +                  -> Tensor Build t -- ^ __output__: The folded tensor.
                                +mirrorPadGrad' op'options input paddings | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs paddings]
                                +        return (opDef "MirrorPadGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "The input tensor to be folded."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "paddings"
                                +  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
                                +  type_attr: "Tpaddings"
                                +}
                                +output_arg {
                                +  name: "output" description: "The folded tensor." type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tpaddings"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "mode"
                                +  type: "string"
                                +  description: "The mode used in the `MirrorPad` op."
                                +  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
                                +}
                                +-}
                                +
                                +-- | Returns element-wise remainder of division. This emulates C semantics in that
                                +--
                                +-- the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
                                +-- y + truncate_mod(x, y) = x`.
                                +-- 
                                +-- *NOTE*: `Mod` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +mod :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double,
                                +                                   Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor v'2 t -- ^ __y__
                                +       -> Tensor Build t -- ^ __z__
                                +mod = mod' id
                                +mod' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double,
                                +                                    Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor v'2 t -- ^ __y__
                                +        -> Tensor Build t -- ^ __z__
                                +mod' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Mod"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns x * y element-wise.
                                +--
                                +-- *NOTE*: `Mul` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +mul :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                   (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                   Data.Int.Int32, Data.Int.Int64,
                                +                                   Data.Int.Int8, Data.Word.Word16,
                                +                                   Data.Word.Word8, Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor v'2 t -- ^ __y__
                                +       -> Tensor Build t -- ^ __z__
                                +mul = mul' id
                                +mul' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor v'2 t -- ^ __y__
                                +        -> Tensor Build t -- ^ __z__
                                +mul' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Mul"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Draws samples from a multinomial distribution.
                                +
                                +multinomial :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16,
                                +                                                             Data.Int.Int32,
                                +                                                             Data.Int.Int64,
                                +                                                             Data.Int.Int8,
                                +                                                             Data.Word.Word16,
                                +                                                             Data.Word.Word8,
                                +                                                             Double,
                                +                                                             Float] t) => 
                                +               Tensor v'1 t -- ^ __logits__: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
                                +                            -- represents the unnormalized log probabilities for all classes.
                                +               -> Tensor v'2 Data.Int.Int32 -- ^ __num_samples__: 0-D.  Number of independent samples to draw for each row slice.
                                +               -> m' (Tensor Value Data.Int.Int64) -- ^ __output__: 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
                                +               -- contains the drawn class labels with range `[0, num_classes)`.
                                +multinomial = multinomial' id
                                +multinomial' :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16,
                                +                                                              Data.Int.Int32,
                                +                                                              Data.Int.Int64,
                                +                                                              Data.Int.Int8,
                                +                                                              Data.Word.Word16,
                                +                                                              Data.Word.Word8,
                                +                                                              Double,
                                +                                                              Float] t) =>
                                +                OpParams ->
                                +                Tensor v'1 t -- ^ __logits__: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
                                +                             -- represents the unnormalized log probabilities for all classes.
                                +                -> Tensor v'2 Data.Int.Int32 -- ^ __num_samples__: 0-D.  Number of independent samples to draw for each row slice.
                                +                -> m' (Tensor Value Data.Int.Int64) -- ^ __output__: 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
                                +                -- contains the drawn class labels with range `[0, num_classes)`.
                                +multinomial' op'options logits num_samples | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits,
                                +                                                             buildInputs num_samples]
                                +        buildOp [] (opDef "Multinomial"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "logits"
                                +  description: "2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "num_samples"
                                +  description: "0-D.  Number of independent samples to draw for each row slice."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 is set to be non-zero, the internal random number\ngenerator is seeded by the given seed.  Otherwise, a random seed is used."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Creates an empty hash table that uses tensors as the backing store.
                                +--
                                +-- It uses "open addressing" with quadratic reprobing to resolve
                                +-- collisions.
                                +-- 
                                +-- This op creates a mutable hash table, specifying the type of its keys and
                                +-- values. Each value must be a scalar. Data can be inserted into the table using
                                +-- the insert operations. It does not support the initialization operation.
                                +mutableDenseHashTable :: forall v'1 key_dtype m' . (MonadBuild m',
                                +                                                    TensorType key_dtype) => 
                                +                         DataType -- ^ __value_dtype__: Type of the table values.
                                +                         -> Tensor v'1 key_dtype -- ^ __empty_key__: The key used to represent empty key buckets internally. Must not
                                +                                                 -- be used in insert or lookup operations.
                                +                         -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table.
                                +mutableDenseHashTable = mutableDenseHashTable' id
                                +mutableDenseHashTable' :: forall v'1 key_dtype m' . (MonadBuild m',
                                +                                                     TensorType key_dtype) =>
                                +                          OpParams ->
                                +                          DataType -- ^ __value_dtype__: Type of the table values.
                                +                          -> Tensor v'1 key_dtype -- ^ __empty_key__: The key used to represent empty key buckets internally. Must not
                                +                                                  -- be used in insert or lookup operations.
                                +                          -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table.
                                +mutableDenseHashTable' op'options value_dtype empty_key | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs empty_key]
                                +        buildOp [] (opDef "MutableDenseHashTable"
                                +                    & opAttr "key_dtype" .~ tensorType (undefined :: key_dtype)
                                +                    & opAttr "value_dtype" .~ value_dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "empty_key"
                                +  description: "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations."
                                +  type_attr: "key_dtype"
                                +}
                                +output_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is shared under the given name across\nmultiple sessions."
                                +}
                                +attr {
                                +  name: "use_node_name_sharing"
                                +  type: "bool"
                                +  default_value { b: false }
                                +}
                                +attr {
                                +  name: "key_dtype"
                                +  type: "type"
                                +  description: "Type of the table keys."
                                +}
                                +attr {
                                +  name: "value_dtype"
                                +  type: "type"
                                +  description: "Type of the table values."
                                +}
                                +attr {
                                +  name: "value_shape"
                                +  type: "shape"
                                +  default_value { shape { } }
                                +  description: "The shape of each value."
                                +}
                                +attr {
                                +  name: "initial_num_buckets"
                                +  type: "int"
                                +  default_value { i: 131072 }
                                +  description: "The initial number of hash table buckets. Must be a power\nto 2."
                                +}
                                +attr {
                                +  name: "max_load_factor"
                                +  type: "float"
                                +  default_value { f: 0.8 }
                                +  description: "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1."
                                +}
                                +-}
                                +
                                +-- | Creates an empty hash table that uses tensors as the backing store.
                                +--
                                +-- It uses "open addressing" with quadratic reprobing to resolve
                                +-- collisions.
                                +-- 
                                +-- This op creates a mutable hash table, specifying the type of its keys and
                                +-- values. Each value must be a scalar. Data can be inserted into the table using
                                +-- the insert operations. It does not support the initialization operation.
                                +mutableDenseHashTableV2 :: forall v'1 key_dtype m' . (MonadBuild m',
                                +                                                      TensorType key_dtype) => 
                                +                           DataType -- ^ __value_dtype__: Type of the table values.
                                +                           -> Tensor v'1 key_dtype -- ^ __empty_key__: The key used to represent empty key buckets internally. Must not
                                +                                                   -- be used in insert or lookup operations.
                                +                           -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__: Handle to a table.
                                +mutableDenseHashTableV2 = mutableDenseHashTableV2' id
                                +mutableDenseHashTableV2' :: forall v'1 key_dtype m' . (MonadBuild m',
                                +                                                       TensorType key_dtype) =>
                                +                            OpParams ->
                                +                            DataType -- ^ __value_dtype__: Type of the table values.
                                +                            -> Tensor v'1 key_dtype -- ^ __empty_key__: The key used to represent empty key buckets internally. Must not
                                +                                                    -- be used in insert or lookup operations.
                                +                            -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__: Handle to a table.
                                +mutableDenseHashTableV2' op'options value_dtype empty_key | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs empty_key]
                                +        buildOp [] (opDef "MutableDenseHashTableV2"
                                +                    & opAttr "key_dtype" .~ tensorType (undefined :: key_dtype)
                                +                    & opAttr "value_dtype" .~ value_dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "empty_key"
                                +  description: "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations."
                                +  type_attr: "key_dtype"
                                +}
                                +output_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is shared under the given name across\nmultiple sessions."
                                +}
                                +attr {
                                +  name: "use_node_name_sharing"
                                +  type: "bool"
                                +  default_value { b: false }
                                +}
                                +attr {
                                +  name: "key_dtype"
                                +  type: "type"
                                +  description: "Type of the table keys."
                                +}
                                +attr {
                                +  name: "value_dtype"
                                +  type: "type"
                                +  description: "Type of the table values."
                                +}
                                +attr {
                                +  name: "value_shape"
                                +  type: "shape"
                                +  default_value { shape { } }
                                +  description: "The shape of each value."
                                +}
                                +attr {
                                +  name: "initial_num_buckets"
                                +  type: "int"
                                +  default_value { i: 131072 }
                                +  description: "The initial number of hash table buckets. Must be a power\nto 2."
                                +}
                                +attr {
                                +  name: "max_load_factor"
                                +  type: "float"
                                +  default_value { f: 0.8 }
                                +  description: "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1."
                                +}
                                +-}
                                +
                                +-- | Creates an empty hash table.
                                +--
                                +-- This op creates a mutable hash table, specifying the type of its keys and
                                +-- values. Each value must be a scalar. Data can be inserted into the table using
                                +-- the insert operations. It does not support the initialization operation.
                                +mutableHashTable :: forall m' . (MonadBuild m') => 
                                +                    DataType -- ^ __key_dtype__: Type of the table keys.
                                +                    -> DataType -- ^ __value_dtype__: Type of the table values.
                                +                    -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table.
                                +mutableHashTable = mutableHashTable' id
                                +mutableHashTable' :: forall m' . (MonadBuild m') => OpParams ->
                                +                     DataType -- ^ __key_dtype__: Type of the table keys.
                                +                     -> DataType -- ^ __value_dtype__: Type of the table values.
                                +                     -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table.
                                +mutableHashTable' op'options key_dtype value_dtype | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "MutableHashTable"
                                +                    & opAttr "key_dtype" .~ key_dtype
                                +                    & opAttr "value_dtype" .~ value_dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is shared under the given name across\nmultiple sessions."
                                +}
                                +attr {
                                +  name: "use_node_name_sharing"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true and shared_name is empty, the table is shared\nusing the node name."
                                +}
                                +attr {
                                +  name: "key_dtype"
                                +  type: "type"
                                +  description: "Type of the table keys."
                                +}
                                +attr {
                                +  name: "value_dtype"
                                +  type: "type"
                                +  description: "Type of the table values."
                                +}
                                +-}
                                +
                                +-- | Creates an empty hash table.
                                +--
                                +-- This op creates a mutable hash table, specifying the type of its keys and
                                +-- values. Each value must be a vector. Data can be inserted into the table using
                                +-- the insert operations. It does not support the initialization operation.
                                +mutableHashTableOfTensors :: forall m' . (MonadBuild m') => 
                                +                             DataType -- ^ __key_dtype__: Type of the table keys.
                                +                             -> DataType -- ^ __value_dtype__: Type of the table values.
                                +                             -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table.
                                +mutableHashTableOfTensors = mutableHashTableOfTensors' id
                                +mutableHashTableOfTensors' :: forall m' . (MonadBuild m') => OpParams ->
                                +                              DataType -- ^ __key_dtype__: Type of the table keys.
                                +                              -> DataType -- ^ __value_dtype__: Type of the table values.
                                +                              -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table.
                                +mutableHashTableOfTensors' op'options key_dtype value_dtype | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "MutableHashTableOfTensors"
                                +                    & opAttr "key_dtype" .~ key_dtype
                                +                    & opAttr "value_dtype" .~ value_dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is shared under the given name across\nmultiple sessions."
                                +}
                                +attr {
                                +  name: "use_node_name_sharing"
                                +  type: "bool"
                                +  default_value { b: false }
                                +}
                                +attr {
                                +  name: "key_dtype"
                                +  type: "type"
                                +  description: "Type of the table keys."
                                +}
                                +attr {
                                +  name: "value_dtype"
                                +  type: "type"
                                +  description: "Type of the table values."
                                +}
                                +attr {
                                +  name: "value_shape" type: "shape" default_value { shape { } }
                                +}
                                +-}
                                +
                                +-- | Creates an empty hash table.
                                +--
                                +-- This op creates a mutable hash table, specifying the type of its keys and
                                +-- values. Each value must be a vector. Data can be inserted into the table using
                                +-- the insert operations. It does not support the initialization operation.
                                +mutableHashTableOfTensorsV2 :: forall m' . (MonadBuild m') => 
                                +                               DataType -- ^ __key_dtype__: Type of the table keys.
                                +                               -> DataType -- ^ __value_dtype__: Type of the table values.
                                +                               -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__: Handle to a table.
                                +mutableHashTableOfTensorsV2 = mutableHashTableOfTensorsV2' id
                                +mutableHashTableOfTensorsV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                                DataType -- ^ __key_dtype__: Type of the table keys.
                                +                                -> DataType -- ^ __value_dtype__: Type of the table values.
                                +                                -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__: Handle to a table.
                                +mutableHashTableOfTensorsV2' op'options key_dtype
                                +                             value_dtype | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "MutableHashTableOfTensorsV2"
                                +                    & opAttr "key_dtype" .~ key_dtype
                                +                    & opAttr "value_dtype" .~ value_dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is shared under the given name across\nmultiple sessions."
                                +}
                                +attr {
                                +  name: "use_node_name_sharing"
                                +  type: "bool"
                                +  default_value { b: false }
                                +}
                                +attr {
                                +  name: "key_dtype"
                                +  type: "type"
                                +  description: "Type of the table keys."
                                +}
                                +attr {
                                +  name: "value_dtype"
                                +  type: "type"
                                +  description: "Type of the table values."
                                +}
                                +attr {
                                +  name: "value_shape" type: "shape" default_value { shape { } }
                                +}
                                +-}
                                +
                                +-- | Creates an empty hash table.
                                +--
                                +-- This op creates a mutable hash table, specifying the type of its keys and
                                +-- values. Each value must be a scalar. Data can be inserted into the table using
                                +-- the insert operations. It does not support the initialization operation.
                                +mutableHashTableV2 :: forall m' . (MonadBuild m') => 
                                +                      DataType -- ^ __key_dtype__: Type of the table keys.
                                +                      -> DataType -- ^ __value_dtype__: Type of the table values.
                                +                      -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__: Handle to a table.
                                +mutableHashTableV2 = mutableHashTableV2' id
                                +mutableHashTableV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                       DataType -- ^ __key_dtype__: Type of the table keys.
                                +                       -> DataType -- ^ __value_dtype__: Type of the table values.
                                +                       -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__: Handle to a table.
                                +mutableHashTableV2' op'options key_dtype value_dtype | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "MutableHashTableV2"
                                +                    & opAttr "key_dtype" .~ key_dtype
                                +                    & opAttr "value_dtype" .~ value_dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "table_handle"
                                +  description: "Handle to a table."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this table is shared under the given name across\nmultiple sessions."
                                +}
                                +attr {
                                +  name: "use_node_name_sharing"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true and shared_name is empty, the table is shared\nusing the node name."
                                +}
                                +attr {
                                +  name: "key_dtype"
                                +  type: "type"
                                +  description: "Type of the table keys."
                                +}
                                +attr {
                                +  name: "value_dtype"
                                +  type: "type"
                                +  description: "Type of the table values."
                                +}
                                +-}
                                +
                                +-- | Computes numerical negative value element-wise.
                                +--
                                +-- I.e., \\(y = -x\\).
                                +neg :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Data.Int.Int32,
                                +                               Data.Int.Int64, Data.Word.Word16, Double,
                                +                               Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor Build t -- ^ __y__
                                +neg = neg' id
                                +neg' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                Data.Int.Int64, Data.Word.Word16, Double,
                                +                                Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +neg' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Neg"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Training via negative sampling.
                                +
                                +negTrain :: forall v'3 v'4 v'5 m' . (MonadBuild m') => 
                                +            Data.Int.Int64 -- ^ __num_negative_samples__: Number of negative samples per example.
                                +            -> Tensor Ref Float -- ^ __w_in__: input word embedding.
                                +            -> Tensor Ref Float -- ^ __w_out__: output word embedding.
                                +            -> Tensor v'3 Data.Int.Int32 -- ^ __examples__: A vector of word ids.
                                +            -> Tensor v'4 Data.Int.Int32 -- ^ __labels__: A vector of word ids.
                                +            -> Tensor v'5 Float -- ^ __lr__
                                +            -> m' (ControlNode)
                                +negTrain = negTrain' id
                                +negTrain' :: forall v'3 v'4 v'5 m' . (MonadBuild m') => OpParams ->
                                +             Data.Int.Int64 -- ^ __num_negative_samples__: Number of negative samples per example.
                                +             -> Tensor Ref Float -- ^ __w_in__: input word embedding.
                                +             -> Tensor Ref Float -- ^ __w_out__: output word embedding.
                                +             -> Tensor v'3 Data.Int.Int32 -- ^ __examples__: A vector of word ids.
                                +             -> Tensor v'4 Data.Int.Int32 -- ^ __labels__: A vector of word ids.
                                +             -> Tensor v'5 Float -- ^ __lr__
                                +             -> m' (ControlNode)
                                +negTrain' op'options num_negative_samples w_in w_out examples labels
                                +          lr | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs w_in,
                                +                                                             buildInputs w_out,
                                +                                                             buildInputs examples,
                                +                                                             buildInputs labels,
                                +                                                             buildInputs lr]
                                +        buildOp [] (opDef "NegTrain"
                                +                    & opAttr "num_negative_samples" .~ num_negative_samples
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "w_in"
                                +  description: "input word embedding."
                                +  type: DT_FLOAT
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "w_out"
                                +  description: "output word embedding."
                                +  type: DT_FLOAT
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "examples"
                                +  description: "A vector of word ids."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "labels" description: "A vector of word ids." type: DT_INT32
                                +}
                                +input_arg { name: "lr" type: DT_FLOAT }
                                +attr {
                                +  name: "vocab_count"
                                +  type: "list(int)"
                                +  description: "Count of words in the vocabulary."
                                +}
                                +attr {
                                +  name: "num_negative_samples"
                                +  type: "int"
                                +  description: "Number of negative samples per example."
                                +}
                                +-}
                                +
                                +-- | Makes its input available to the next iteration.
                                +
                                +nextIteration :: forall v'1 t . (TensorType t) => 
                                +                 Tensor v'1 t -- ^ __data__: The tensor to be made available to the next iteration.
                                +                 -> Tensor Build t -- ^ __output__: The same tensor as `data`.
                                +nextIteration = nextIteration' id
                                +nextIteration' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __data__: The tensor to be made available to the next iteration.
                                +                  -> Tensor Build t -- ^ __output__: The same tensor as `data`.
                                +nextIteration' op'options data' | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data']
                                +        return (opDef "NextIteration"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "data"
                                +  description: "The tensor to be made available to the next iteration."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same tensor as `data`."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Does nothing. Only useful as a placeholder for control edges.
                                +
                                +noOp :: forall m' . (MonadBuild m') => 
                                +        m' (ControlNode)
                                +noOp = noOp' id
                                +noOp' :: forall m' . (MonadBuild m') => OpParams ->
                                +         m' (ControlNode)
                                +noOp' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "NoOp"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +
                                +-}
                                +
                                +-- | Greedily selects a subset of bounding boxes in descending order of score,
                                +--
                                +-- pruning away boxes that have high intersection-over-union (IOU) overlap
                                +-- with previously selected boxes.  Bounding boxes are supplied as
                                +-- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
                                +-- diagonal pair of box corners and the coordinates can be provided as normalized
                                +-- (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
                                +-- is agnostic to where the origin is in the coordinate system.  Note that this
                                +-- algorithm is invariant to orthogonal transformations and translations
                                +-- of the coordinate system; thus translating or reflections of the coordinate
                                +-- system result in the same boxes being selected by the algorithm.
                                +-- The output of this operation is a set of integers indexing into the input
                                +-- collection of bounding boxes representing the selected boxes.  The bounding
                                +-- box coordinates corresponding to the selected indices can then be obtained
                                +-- using the `tf.gather operation`.  For example:
                                +--   selected_indices = tf.image.non_max_suppression(
                                +--       boxes, scores, max_output_size, iou_threshold)
                                +--   selected_boxes = tf.gather(boxes, selected_indices)
                                +nonMaxSuppression :: 
                                +                     Tensor v'1 Float -- ^ __boxes__: A 2-D float tensor of shape `[num_boxes, 4]`.
                                +                     -> Tensor v'2 Float -- ^ __scores__: A 1-D float tensor of shape `[num_boxes]` representing a single
                                +                                         -- score corresponding to each box (each row of boxes).
                                +                     -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__: A scalar integer tensor representing the maximum number of
                                +                                                  -- boxes to be selected by non max suppression.
                                +                     -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__: A 1-D integer tensor of shape `[M]` representing the selected
                                +                     -- indices from the boxes tensor, where `M <= max_output_size`.
                                +nonMaxSuppression = nonMaxSuppression' id
                                +nonMaxSuppression' :: OpParams ->
                                +                      Tensor v'1 Float -- ^ __boxes__: A 2-D float tensor of shape `[num_boxes, 4]`.
                                +                      -> Tensor v'2 Float -- ^ __scores__: A 1-D float tensor of shape `[num_boxes]` representing a single
                                +                                          -- score corresponding to each box (each row of boxes).
                                +                      -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__: A scalar integer tensor representing the maximum number of
                                +                                                   -- boxes to be selected by non max suppression.
                                +                      -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__: A 1-D integer tensor of shape `[M]` representing the selected
                                +                      -- indices from the boxes tensor, where `M <= max_output_size`.
                                +nonMaxSuppression' op'options boxes scores max_output_size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs boxes,
                                +                                                             buildInputs scores,
                                +                                                             buildInputs max_output_size]
                                +        return (opDef "NonMaxSuppression"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "boxes"
                                +  description: "A 2-D float tensor of shape `[num_boxes, 4]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "scores"
                                +  description: "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes)."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_output_size"
                                +  description: "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "selected_indices"
                                +  description: "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`."
                                +  type: DT_INT32
                                +}
                                +attr {
                                +  name: "iou_threshold"
                                +  type: "float"
                                +  default_value { f: 0.5 }
                                +  description: "A float representing the threshold for deciding whether boxes\noverlap too much with respect to IOU."
                                +}
                                +-}
                                +
                                +-- | Greedily selects a subset of bounding boxes in descending order of score,
                                +--
                                +-- pruning away boxes that have high intersection-over-union (IOU) overlap
                                +-- with previously selected boxes.  Bounding boxes are supplied as
                                +-- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
                                +-- diagonal pair of box corners and the coordinates can be provided as normalized
                                +-- (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
                                +-- is agnostic to where the origin is in the coordinate system.  Note that this
                                +-- algorithm is invariant to orthogonal transformations and translations
                                +-- of the coordinate system; thus translating or reflections of the coordinate
                                +-- system result in the same boxes being selected by the algorithm.
                                +-- 
                                +-- The output of this operation is a set of integers indexing into the input
                                +-- collection of bounding boxes representing the selected boxes.  The bounding
                                +-- box coordinates corresponding to the selected indices can then be obtained
                                +-- using the `tf.gather operation`.  For example:
                                +-- 
                                +--   selected_indices = tf.image.non_max_suppression_v2(
                                +--       boxes, scores, max_output_size, iou_threshold)
                                +--   selected_boxes = tf.gather(boxes, selected_indices)
                                +nonMaxSuppressionV2 :: 
                                +                       Tensor v'1 Float -- ^ __boxes__: A 2-D float tensor of shape `[num_boxes, 4]`.
                                +                       -> Tensor v'2 Float -- ^ __scores__: A 1-D float tensor of shape `[num_boxes]` representing a single
                                +                                           -- score corresponding to each box (each row of boxes).
                                +                       -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__: A scalar integer tensor representing the maximum number of
                                +                                                    -- boxes to be selected by non max suppression.
                                +                       -> Tensor v'4 Float -- ^ __iou_threshold__: A 0-D float tensor representing the threshold for deciding whether
                                +                                           -- boxes overlap too much with respect to IOU.
                                +                       -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__: A 1-D integer tensor of shape `[M]` representing the selected
                                +                       -- indices from the boxes tensor, where `M <= max_output_size`.
                                +nonMaxSuppressionV2 = nonMaxSuppressionV2' id
                                +nonMaxSuppressionV2' :: OpParams ->
                                +                        Tensor v'1 Float -- ^ __boxes__: A 2-D float tensor of shape `[num_boxes, 4]`.
                                +                        -> Tensor v'2 Float -- ^ __scores__: A 1-D float tensor of shape `[num_boxes]` representing a single
                                +                                            -- score corresponding to each box (each row of boxes).
                                +                        -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__: A scalar integer tensor representing the maximum number of
                                +                                                     -- boxes to be selected by non max suppression.
                                +                        -> Tensor v'4 Float -- ^ __iou_threshold__: A 0-D float tensor representing the threshold for deciding whether
                                +                                            -- boxes overlap too much with respect to IOU.
                                +                        -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__: A 1-D integer tensor of shape `[M]` representing the selected
                                +                        -- indices from the boxes tensor, where `M <= max_output_size`.
                                +nonMaxSuppressionV2' op'options boxes scores max_output_size
                                +                     iou_threshold | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs boxes,
                                +                                                             buildInputs scores,
                                +                                                             buildInputs max_output_size,
                                +                                                             buildInputs iou_threshold]
                                +        return (opDef "NonMaxSuppressionV2"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "boxes"
                                +  description: "A 2-D float tensor of shape `[num_boxes, 4]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "scores"
                                +  description: "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes)."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_output_size"
                                +  description: "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "iou_threshold"
                                +  description: "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "selected_indices"
                                +  description: "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`."
                                +  type: DT_INT32
                                +}
                                +-}
                                +
                                +-- | Returns the truth value of (x != y) element-wise.
                                +--
                                +-- *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +notEqual :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float), Bool,
                                +                                        Data.ByteString.ByteString,
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => 
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build Bool -- ^ __z__
                                +notEqual = notEqual' id
                                +notEqual' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float), Bool,
                                +                                         Data.ByteString.ByteString,
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor v'2 t -- ^ __y__
                                +             -> Tensor Build Bool -- ^ __z__
                                +notEqual' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "NotEqual"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type: DT_BOOL }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_QUINT8
                                +      type: DT_QINT8
                                +      type: DT_QINT32
                                +      type: DT_STRING
                                +      type: DT_BOOL
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns a one-hot tensor.
                                +--
                                +-- The locations represented by indices in `indices` take value `on_value`,
                                +-- while all other locations take value `off_value`.
                                +-- 
                                +-- If the input `indices` is rank `N`, the output will have rank `N+1`,
                                +-- The new axis is created at dimension `axis` (default: the new axis is
                                +-- appended at the end).
                                +-- 
                                +-- If `indices` is a scalar the output shape will be a vector of length `depth`.
                                +-- 
                                +-- If `indices` is a vector of length `features`, the output shape will be:
                                +-- ```
                                +--   features x depth if axis == -1
                                +--   depth x features if axis == 0
                                +-- ```
                                +-- 
                                +-- If `indices` is a matrix (batch) with shape `[batch, features]`,
                                +-- the output shape will be:
                                +-- ```
                                +--   batch x features x depth if axis == -1
                                +--   batch x depth x features if axis == 1
                                +--   depth x batch x features if axis == 0
                                +-- ```
                                +-- 
                                +-- 
                                +-- Examples
                                +-- =========
                                +-- 
                                +-- Suppose that
                                +-- 
                                +-- ```
                                +--   indices = [0, 2, -1, 1]
                                +--   depth = 3
                                +--   on_value = 5.0
                                +--   off_value = 0.0
                                +--   axis = -1
                                +-- ```
                                +-- 
                                +-- Then output is `[4 x 3]`:
                                +-- 
                                +--     ```output =
                                +--       [5.0 0.0 0.0]  // one_hot(0)
                                +--       [0.0 0.0 5.0]  // one_hot(2)
                                +--       [0.0 0.0 0.0]  // one_hot(-1)
                                +--       [0.0 5.0 0.0]  // one_hot(1)
                                +--     ```
                                +-- 
                                +-- Suppose that
                                +-- 
                                +-- ```
                                +--   indices = [0, 2, -1, 1]
                                +--   depth = 3
                                +--   on_value = 0.0
                                +--   off_value = 3.0
                                +--   axis = 0
                                +-- ```
                                +-- 
                                +-- Then output is `[3 x 4]`:
                                +-- 
                                +--     ```output =
                                +--       [0.0 3.0 3.0 3.0]
                                +--       [3.0 3.0 3.0 0.0]
                                +--       [3.0 3.0 3.0 3.0]
                                +--       [3.0 0.0 3.0 3.0]
                                +--     //  ^                one_hot(0)
                                +--     //      ^            one_hot(2)
                                +--     //          ^        one_hot(-1)
                                +--     //              ^    one_hot(1)
                                +--     ```
                                +-- Suppose that
                                +-- 
                                +-- ```
                                +--   indices = [[0, 2], [1, -1]]
                                +--   depth = 3
                                +--   on_value = 1.0
                                +--   off_value = 0.0
                                +--   axis = -1
                                +-- ```
                                +-- 
                                +-- Then output is `[2 x 2 x 3]`:
                                +-- 
                                +--     ```output =
                                +--       [
                                +--         [1.0, 0.0, 0.0]  // one_hot(0)
                                +--         [0.0, 0.0, 1.0]  // one_hot(2)
                                +--       ][
                                +--         [0.0, 1.0, 0.0]  // one_hot(1)
                                +--         [0.0, 0.0, 0.0]  // one_hot(-1)
                                +--       ]```
                                +oneHot :: forall v'1 v'2 v'3 v'4 t tI . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                               Data.Int.Int64,
                                +                                                               Data.Word.Word8] tI) =>
                                +          
                                +          Tensor v'1 tI -- ^ __indices__: A tensor of indices.
                                +          -> Tensor v'2 Data.Int.Int32 -- ^ __depth__: A scalar defining the depth of the one hot dimension.
                                +          -> Tensor v'3 t -- ^ __on_value__: A scalar defining the value to fill in output when `indices[j] = i`.
                                +          -> Tensor v'4 t -- ^ __off_value__: A scalar defining the value to fill in output when `indices[j] != i`.
                                +          -> Tensor Build t -- ^ __output__: The one-hot tensor.
                                +oneHot = oneHot' id
                                +oneHot' :: forall v'1 v'2 v'3 v'4 t tI . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                                Data.Int.Int64,
                                +                                                                Data.Word.Word8] tI) =>
                                +           OpParams ->
                                +           Tensor v'1 tI -- ^ __indices__: A tensor of indices.
                                +           -> Tensor v'2 Data.Int.Int32 -- ^ __depth__: A scalar defining the depth of the one hot dimension.
                                +           -> Tensor v'3 t -- ^ __on_value__: A scalar defining the value to fill in output when `indices[j] = i`.
                                +           -> Tensor v'4 t -- ^ __off_value__: A scalar defining the value to fill in output when `indices[j] != i`.
                                +           -> Tensor Build t -- ^ __output__: The one-hot tensor.
                                +oneHot' op'options indices depth on_value off_value | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices,
                                +                                                             buildInputs depth,
                                +                                                             buildInputs on_value,
                                +                                                             buildInputs off_value]
                                +        return (opDef "OneHot"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "TI" .~ tensorType (undefined :: tI)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "indices" description: "A tensor of indices." type_attr: "TI"
                                +}
                                +input_arg {
                                +  name: "depth"
                                +  description: "A scalar defining the depth of the one hot dimension."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "on_value"
                                +  description: "A scalar defining the value to fill in output when `indices[j] = i`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "off_value"
                                +  description: "A scalar defining the value to fill in output when `indices[j] != i`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output" description: "The one-hot tensor." type_attr: "T"
                                +}
                                +attr {
                                +  name: "axis"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The axis to fill (default: -1, a new inner-most axis)."
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "TI"
                                +  type: "type"
                                +  default_value { type: DT_INT64 }
                                +  allowed_values {
                                +    list { type: DT_UINT8 type: DT_INT32 type: DT_INT64 }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns a tensor of ones with the same shape and type as x.
                                +
                                +onesLike :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int32, Data.Int.Int64, Double,
                                +                                    Float] t) => 
                                +            Tensor v'1 t -- ^ __x__: a tensor of type T.
                                +            -> Tensor Build t -- ^ __y__: a tensor of the same shape and type as x but filled with ones.
                                +onesLike = onesLike' id
                                +onesLike' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float),
                                +                                     Data.Int.Int32, Data.Int.Int64, Double,
                                +                                     Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __x__: a tensor of type T.
                                +             -> Tensor Build t -- ^ __y__: a tensor of the same shape and type as x but filled with ones.
                                +onesLike' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "OnesLike"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "x" description: "a tensor of type T." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "y"
                                +  description: "a tensor of the same shape and type as x but filled with ones."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Op removes all elements in the underlying container.
                                +
                                +orderedMapClear :: forall m' . (MonadBuild m') => 
                                +                   [DataType] -- ^ __dtypes__
                                +                   -> m' (ControlNode)
                                +orderedMapClear = orderedMapClear' id
                                +orderedMapClear' :: forall m' . (MonadBuild m') => OpParams ->
                                +                    [DataType] -- ^ __dtypes__
                                +                    -> m' (ControlNode)
                                +orderedMapClear' op'options dtypes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "OrderedMapClear"
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op returns the number of incomplete elements in the underlying container.
                                +
                                +orderedMapIncompleteSize :: forall m' . (MonadBuild m') => 
                                +                            [DataType] -- ^ __dtypes__
                                +                            -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +orderedMapIncompleteSize = orderedMapIncompleteSize' id
                                +orderedMapIncompleteSize' :: forall m' . (MonadBuild m') => OpParams ->
                                +                             [DataType] -- ^ __dtypes__
                                +                             -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +orderedMapIncompleteSize' op'options dtypes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "OrderedMapIncompleteSize"
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "size" type: DT_INT32 }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op peeks at the values at the specified key.  If the
                                +--
                                +-- underlying container does not contain this key
                                +-- this op will block until it does.   This Op is optimized for
                                +-- performance.
                                +orderedMapPeek :: forall v'1 v'2 dtypes m' . (MonadBuild m',
                                +                                              TensorTypes dtypes) => 
                                +                  Tensor v'1 Data.Int.Int64 -- ^ __key__
                                +                  -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                  -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +orderedMapPeek = orderedMapPeek' id
                                +orderedMapPeek' :: forall v'1 v'2 dtypes m' . (MonadBuild m',
                                +                                               TensorTypes dtypes) =>
                                +                   OpParams ->
                                +                   Tensor v'1 Data.Int.Int64 -- ^ __key__
                                +                   -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                   -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +orderedMapPeek' op'options key indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "OrderedMapPeek"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "key" type: DT_INT64 }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +output_arg { name: "values" type_list_attr: "dtypes" }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op returns the number of elements in the underlying container.
                                +
                                +orderedMapSize :: forall m' . (MonadBuild m') => 
                                +                  [DataType] -- ^ __dtypes__
                                +                  -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +orderedMapSize = orderedMapSize' id
                                +orderedMapSize' :: forall m' . (MonadBuild m') => OpParams ->
                                +                   [DataType] -- ^ __dtypes__
                                +                   -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +orderedMapSize' op'options dtypes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "OrderedMapSize"
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "size" type: DT_INT32 }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Stage (key, values) in the underlying container which behaves like a ordered
                                +--
                                +-- associative container.   Elements are ordered by key.
                                +orderedMapStage :: forall v'1 v'2 v'3 fake_dtypes m' . (MonadBuild m',
                                +                                                        TensorTypes fake_dtypes) =>
                                +                   
                                +                   [DataType] -- ^ __dtypes__
                                +                   -> Tensor v'1 Data.Int.Int64 -- ^ __key__: int64
                                +                   -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                   -> TensorList (v'3) fake_dtypes -- ^ __values__: a list of tensors
                                +                                                   -- dtypes A list of data types that inserted values should adhere to.
                                +                   -> m' (ControlNode)
                                +orderedMapStage = orderedMapStage' id
                                +orderedMapStage' :: forall v'1 v'2 v'3 fake_dtypes m' . (MonadBuild m',
                                +                                                         TensorTypes fake_dtypes) =>
                                +                    OpParams ->
                                +                    [DataType] -- ^ __dtypes__
                                +                    -> Tensor v'1 Data.Int.Int64 -- ^ __key__: int64
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                    -> TensorList (v'3) fake_dtypes -- ^ __values__: a list of tensors
                                +                                                    -- dtypes A list of data types that inserted values should adhere to.
                                +                    -> m' (ControlNode)
                                +orderedMapStage' op'options dtypes key indices values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs values]
                                +        buildOp [] (opDef "OrderedMapStage"
                                +                    & opAttr "fake_dtypes" .~ fromTensorTypes (Proxy :: Proxy fake_dtypes)
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "key" description: "int64" type: DT_INT64 }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +input_arg {
                                +  name: "values"
                                +  description: "a list of tensors\ndtypes A list of data types that inserted values should adhere to."
                                +  type_list_attr: "fake_dtypes"
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr {
                                +  name: "fake_dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "It is necessary to match this name to the matching Unstage Op."
                                +}
                                +-}
                                +
                                +-- | Op removes and returns the values associated with the key
                                +--
                                +-- from the underlying container.   If the underlying container
                                +-- does not contain this key, the op will block until it does.
                                +orderedMapUnstage :: forall v'1 v'2 dtypes m' . (MonadBuild m',
                                +                                                 TensorTypes dtypes) => 
                                +                     Tensor v'1 Data.Int.Int64 -- ^ __key__
                                +                     -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                     -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +orderedMapUnstage = orderedMapUnstage' id
                                +orderedMapUnstage' :: forall v'1 v'2 dtypes m' . (MonadBuild m',
                                +                                                  TensorTypes dtypes) =>
                                +                      OpParams ->
                                +                      Tensor v'1 Data.Int.Int64 -- ^ __key__
                                +                      -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                      -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +orderedMapUnstage' op'options key indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "OrderedMapUnstage"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "key" type: DT_INT64 }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +output_arg { name: "values" type_list_attr: "dtypes" }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op removes and returns the (key, value) element with the smallest
                                +--
                                +-- key from the underlying container.   If the underlying container
                                +-- does not contain elements, the op will block until it does.
                                +orderedMapUnstageNoKey :: forall v'1 dtypes m' . (MonadBuild m',
                                +                                                  TensorTypes dtypes) => 
                                +                          Tensor v'1 Data.Int.Int32 -- ^ __indices__
                                +                          -> m' ((Tensor Value Data.Int.Int64,
                                +                                  TensorList (Value) dtypes))
                                +                          -- ^ (__key__, __values__)
                                +                          --
                                +                          -- * __key__
                                +                          --
                                +                          -- * __values__
                                +orderedMapUnstageNoKey = orderedMapUnstageNoKey' id
                                +orderedMapUnstageNoKey' :: forall v'1 dtypes m' . (MonadBuild m',
                                +                                                   TensorTypes dtypes) =>
                                +                           OpParams ->
                                +                           Tensor v'1 Data.Int.Int32 -- ^ __indices__
                                +                           -> m' ((Tensor Value Data.Int.Int64,
                                +                                   TensorList (Value) dtypes))
                                +                           -- ^ (__key__, __values__)
                                +                           --
                                +                           -- * __key__
                                +                           --
                                +                           -- * __values__
                                +orderedMapUnstageNoKey' op'options indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices]
                                +        buildOp [] (opDef "OrderedMapUnstageNoKey"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "indices" type: DT_INT32 }
                                +output_arg { name: "key" type: DT_INT64 }
                                +output_arg { name: "values" type_list_attr: "dtypes" }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
                                +--
                                +-- Packs the `N` tensors in `values` into a tensor with rank one higher than each
                                +-- tensor in `values`, by packing them along the `axis` dimension.
                                +-- Given a list of tensors of shape `(A, B, C)`;
                                +-- 
                                +-- if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
                                +-- if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
                                +-- Etc.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 'x' is [1, 4]
                                +-- # 'y' is [2, 5]
                                +-- # 'z' is [3, 6]
                                +-- pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
                                +-- pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
                                +-- ```
                                +-- 
                                +-- This is the opposite of `unpack`.
                                +pack :: forall v'1 t . (TensorType t) => 
                                +        [Tensor v'1 t] -- ^ __values__: Must be of same shape and type.
                                +        -> Tensor Build t -- ^ __output__: The packed tensor.
                                +pack = pack' id
                                +pack' :: forall v'1 t . (TensorType t) => OpParams ->
                                +         [Tensor v'1 t] -- ^ __values__: Must be of same shape and type.
                                +         -> Tensor Build t -- ^ __output__: The packed tensor.
                                +pack' op'options values | eqLengthGuard [("N", [("values", length values)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values]
                                +        return (opDef "Pack"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length values) :: Int64
                                +{-
                                +input_arg {
                                +  name: "values"
                                +  description: "Must be of same shape and type."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +}
                                +output_arg {
                                +  name: "output" description: "The packed tensor." type_attr: "T"
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "axis"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Dimension along which to pack.  Negative values wrap around, so the\nvalid range is `[-(R+1), R+1)`."
                                +}
                                +-}
                                +
                                +-- | Pads a tensor with zeros.
                                +--
                                +-- This operation pads a `input` with zeros according to the `paddings` you
                                +-- specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
                                +-- rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
                                +-- how many zeros to add before the contents of `input` in that dimension, and
                                +-- `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
                                +-- in that dimension.
                                +-- 
                                +-- The padded size of each dimension D of the output is:
                                +-- 
                                +-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 't' is [[1, 1], [2, 2]]
                                +-- # 'paddings' is [[1, 1], [2, 2]]
                                +-- # rank of 't' is 2
                                +-- pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
                                +--                       [0, 0, 1, 1, 0, 0]
                                +--                       [0, 0, 2, 2, 0, 0]
                                +--                       [0, 0, 0, 0, 0, 0]]
                                +-- ```
                                +pad :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                           Data.Int.Int64] tpaddings) =>
                                +       
                                +       Tensor v'1 t -- ^ __input__
                                +       -> Tensor v'2 tpaddings -- ^ __paddings__
                                +       -> Tensor Build t -- ^ __output__
                                +pad = pad' id
                                +pad' :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                            Data.Int.Int64] tpaddings) =>
                                +        OpParams ->
                                +        Tensor v'1 t -- ^ __input__
                                +        -> Tensor v'2 tpaddings -- ^ __paddings__
                                +        -> Tensor Build t -- ^ __output__
                                +pad' op'options input paddings | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs paddings]
                                +        return (opDef "Pad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg { name: "paddings" type_attr: "Tpaddings" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tpaddings"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Pads a tensor.
                                +--
                                +-- This operation pads `input` according to the `paddings` and `constant_values`
                                +-- you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
                                +-- the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
                                +-- how many padding values to add before the contents of `input` in that dimension,
                                +-- and `paddings[D, 1]` indicates how many padding values to add after the contents
                                +-- of `input` in that dimension. `constant_values` is a scalar tensor of the same
                                +-- type as `input` that indicates the value to use for padding `input`.
                                +-- 
                                +-- The padded size of each dimension D of the output is:
                                +-- 
                                +-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 't' is [[1, 1], [2, 2]]
                                +-- # 'paddings' is [[1, 1], [2, 2]]
                                +-- # 'constant_values' is 0
                                +-- # rank of 't' is 2
                                +-- pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
                                +--                       [0, 0, 1, 1, 0, 0]
                                +--                       [0, 0, 2, 2, 0, 0]
                                +--                       [0, 0, 0, 0, 0, 0]]
                                +-- ```
                                +padV2 :: forall v'1 v'2 v'3 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                                 Data.Int.Int64] tpaddings) =>
                                +         
                                +         Tensor v'1 t -- ^ __input__
                                +         -> Tensor v'2 tpaddings -- ^ __paddings__
                                +         -> Tensor v'3 t -- ^ __constant_values__
                                +         -> Tensor Build t -- ^ __output__
                                +padV2 = padV2' id
                                +padV2' :: forall v'1 v'2 v'3 t tpaddings . (TensorType t,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tpaddings) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __input__
                                +          -> Tensor v'2 tpaddings -- ^ __paddings__
                                +          -> Tensor v'3 t -- ^ __constant_values__
                                +          -> Tensor Build t -- ^ __output__
                                +padV2' op'options input paddings constant_values | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs paddings,
                                +                                                             buildInputs constant_values]
                                +        return (opDef "PadV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg { name: "paddings" type_attr: "Tpaddings" }
                                +input_arg { name: "constant_values" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tpaddings"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that batches and pads `batch_size` elements from the input.
                                +
                                +paddedBatchDataset :: forall v'1 v'2 v'3 v'4 toutput_types m' . (MonadBuild m',
                                +                                                                 TensorTypes toutput_types) =>
                                +                      
                                +                      Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                      -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__: A scalar representing the number of elements to accumulate in a
                                +                                                   -- batch.
                                +                      -> [Tensor v'3 Data.Int.Int64] -- ^ __padded_shapes__: A list of int64 tensors representing the desired padded shapes
                                +                                                     -- of the corresponding output components. These shapes may be partially
                                +                                                     -- specified, using `-1` to indicate that a particular dimension should be
                                +                                                     -- padded to the maximum size of all batch elements.
                                +                      -> TensorList (v'4) toutput_types -- ^ __padding_values__: A list of scalars containing the padding value to use for
                                +                                                        -- each of the outputs.
                                +                      -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +paddedBatchDataset = paddedBatchDataset' id
                                +paddedBatchDataset' :: forall v'1 v'2 v'3 v'4 toutput_types m' . (MonadBuild m',
                                +                                                                  TensorTypes toutput_types) =>
                                +                       OpParams ->
                                +                       Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                       -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__: A scalar representing the number of elements to accumulate in a
                                +                                                    -- batch.
                                +                       -> [Tensor v'3 Data.Int.Int64] -- ^ __padded_shapes__: A list of int64 tensors representing the desired padded shapes
                                +                                                      -- of the corresponding output components. These shapes may be partially
                                +                                                      -- specified, using `-1` to indicate that a particular dimension should be
                                +                                                      -- padded to the maximum size of all batch elements.
                                +                       -> TensorList (v'4) toutput_types -- ^ __padding_values__: A list of scalars containing the padding value to use for
                                +                                                         -- each of the outputs.
                                +                       -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +paddedBatchDataset' op'options input_dataset batch_size padded_shapes
                                +                    padding_values | eqLengthGuard [("N", [("padded_shapes", length padded_shapes)])] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset,
                                +                                                             buildInputs batch_size,
                                +                                                             buildInputs padded_shapes,
                                +                                                             buildInputs padding_values]
                                +        buildOp [] (opDef "PaddedBatchDataset"
                                +                    & opAttr "Toutput_types" .~ fromTensorTypes (Proxy :: Proxy toutput_types)
                                +                    & opAttr "N" .~ n
                                +                    & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length padded_shapes) :: Int64
                                +{-
                                +input_arg { name: "input_dataset" type: DT_RESOURCE }
                                +input_arg {
                                +  name: "batch_size"
                                +  description: "A scalar representing the number of elements to accumulate in a\nbatch."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "padded_shapes"
                                +  description: "A list of int64 tensors representing the desired padded shapes\nof the corresponding output components. These shapes may be partially\nspecified, using `-1` to indicate that a particular dimension should be\npadded to the maximum size of all batch elements."
                                +  type: DT_INT64
                                +  number_attr: "N"
                                +}
                                +input_arg {
                                +  name: "padding_values"
                                +  description: "A list of scalars containing the padding value to use for\neach of the outputs."
                                +  type_list_attr: "Toutput_types"
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "Toutput_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +-}
                                +
                                +-- | A queue that produces elements in first-in first-out order.
                                +--
                                +-- Variable-size shapes are allowed by setting the corresponding shape dimensions
                                +-- to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
                                +-- size of any given element in the minibatch.  See below for details.
                                +paddingFIFOQueue :: forall m' . (MonadBuild m') => 
                                +                    [DataType] -- ^ __component_types__: The type of each component in a value.
                                +                    -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
                                +paddingFIFOQueue = paddingFIFOQueue' id
                                +paddingFIFOQueue' :: forall m' . (MonadBuild m') => OpParams ->
                                +                     [DataType] -- ^ __component_types__: The type of each component in a value.
                                +                     -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
                                +paddingFIFOQueue' op'options component_types | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "PaddingFIFOQueue"
                                +                    & opAttr "component_types" .~ component_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "shapes"
                                +  type: "list(shape)"
                                +  default_value { list { } }
                                +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1.  In this case, the inputs\' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | A queue that produces elements in first-in first-out order.
                                +--
                                +-- Variable-size shapes are allowed by setting the corresponding shape dimensions
                                +-- to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
                                +-- size of any given element in the minibatch.  See below for details.
                                +paddingFIFOQueueV2 :: forall m' . (MonadBuild m') => 
                                +                      [DataType] -- ^ __component_types__: The type of each component in a value.
                                +                      -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue.
                                +paddingFIFOQueueV2 = paddingFIFOQueueV2' id
                                +paddingFIFOQueueV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                       [DataType] -- ^ __component_types__: The type of each component in a value.
                                +                       -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue.
                                +paddingFIFOQueueV2' op'options component_types | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "PaddingFIFOQueueV2"
                                +                    & opAttr "component_types" .~ component_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the queue."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "shapes"
                                +  type: "list(shape)"
                                +  default_value { list { } }
                                +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1.  In this case, the inputs\' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | Concatenates a list of `N` tensors along the first dimension.
                                +--
                                +-- The input tensors are all required to have size 1 in the first dimension.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 'x' is [[1, 4]]
                                +-- # 'y' is [[2, 5]]
                                +-- # 'z' is [[3, 6]]
                                +-- parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
                                +-- ```
                                +-- 
                                +-- The difference between concat and parallel_concat is that concat requires all
                                +-- of the inputs be computed before the operation will begin but doesn't require
                                +-- that the input shapes be known during graph construction.  Parallel concat
                                +-- will copy pieces of the input into the output as they become available, in
                                +-- some situations this can provide a performance benefit.
                                +parallelConcat :: forall v'1 t . (TensorType t) => 
                                +                  Shape -- ^ __shape__: the final shape of the result; should be equal to the shapes of any input
                                +                        -- but with the number of input values in the first dimension.
                                +                  -> [Tensor v'1 t] -- ^ __values__: Tensors to be concatenated. All must have size 1 in the first dimension
                                +                                    -- and same shape.
                                +                  -> Tensor Build t -- ^ __output__: The concatenated tensor.
                                +parallelConcat = parallelConcat' id
                                +parallelConcat' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                   Shape -- ^ __shape__: the final shape of the result; should be equal to the shapes of any input
                                +                         -- but with the number of input values in the first dimension.
                                +                   -> [Tensor v'1 t] -- ^ __values__: Tensors to be concatenated. All must have size 1 in the first dimension
                                +                                     -- and same shape.
                                +                   -> Tensor Build t -- ^ __output__: The concatenated tensor.
                                +parallelConcat' op'options shape
                                +                values | eqLengthGuard [("N", [("values", length values)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values]
                                +        return (opDef "ParallelConcat"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "shape" .~ shape
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length values) :: Int64
                                +{-
                                +input_arg {
                                +  name: "values"
                                +  description: "Tensors to be concatenated. All must have size 1 in the first dimension\nand same shape."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The concatenated tensor."
                                +  type_attr: "T"
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  description: "the final shape of the result; should be equal to the shapes of any input\nbut with the number of input values in the first dimension."
                                +}
                                +-}
                                +
                                +-- | Outputs random values from a normal distribution. The parameters may each be a
                                +--
                                +-- scalar which applies to the entire output, or a vector of length shape[0] which
                                +-- stores the parameters for each batch.
                                +parameterizedTruncatedNormal :: forall v'1 v'2 v'3 v'4 v'5 dtype t
                                +                                m' . (MonadBuild m', OneOf '[Data.Word.Word16,
                                +                                                             Double,
                                +                                                             Float] dtype,
                                +                                      OneOf '[Data.Int.Int32,
                                +                                              Data.Int.Int64] t) => 
                                +                                Tensor v'1 t -- ^ __shape__: The shape of the output tensor. Batches are indexed by the 0th dimension.
                                +                                -> Tensor v'2 dtype -- ^ __means__: The mean parameter of each batch.
                                +                                -> Tensor v'3 dtype -- ^ __stdevs__: The standard deviation parameter of each batch. Must be greater than 0.
                                +                                -> Tensor v'4 dtype -- ^ __minvals__: The minimum cutoff. May be -infinity.
                                +                                -> Tensor v'5 dtype -- ^ __maxvals__: The maximum cutoff. May be +infinity, and must be more than the minval
                                +                                                    -- for each batch.
                                +                                -> m' (Tensor Value dtype) -- ^ __output__: A matrix of shape num_batches x samples_per_batch, filled with random
                                +                                -- truncated normal values using the parameters for each row.
                                +parameterizedTruncatedNormal = parameterizedTruncatedNormal' id
                                +parameterizedTruncatedNormal' :: forall v'1 v'2 v'3 v'4 v'5 dtype t
                                +                                 m' . (MonadBuild m', OneOf '[Data.Word.Word16,
                                +                                                              Double,
                                +                                                              Float] dtype,
                                +                                       OneOf '[Data.Int.Int32,
                                +                                               Data.Int.Int64] t) => OpParams ->
                                +                                 Tensor v'1 t -- ^ __shape__: The shape of the output tensor. Batches are indexed by the 0th dimension.
                                +                                 -> Tensor v'2 dtype -- ^ __means__: The mean parameter of each batch.
                                +                                 -> Tensor v'3 dtype -- ^ __stdevs__: The standard deviation parameter of each batch. Must be greater than 0.
                                +                                 -> Tensor v'4 dtype -- ^ __minvals__: The minimum cutoff. May be -infinity.
                                +                                 -> Tensor v'5 dtype -- ^ __maxvals__: The maximum cutoff. May be +infinity, and must be more than the minval
                                +                                                     -- for each batch.
                                +                                 -> m' (Tensor Value dtype) -- ^ __output__: A matrix of shape num_batches x samples_per_batch, filled with random
                                +                                 -- truncated normal values using the parameters for each row.
                                +parameterizedTruncatedNormal' op'options shape means stdevs minvals
                                +                              maxvals | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape,
                                +                                                             buildInputs means,
                                +                                                             buildInputs stdevs,
                                +                                                             buildInputs minvals,
                                +                                                             buildInputs maxvals]
                                +        buildOp [] (opDef "ParameterizedTruncatedNormal"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "The shape of the output tensor. Batches are indexed by the 0th dimension."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "means"
                                +  description: "The mean parameter of each batch."
                                +  type_attr: "dtype"
                                +}
                                +input_arg {
                                +  name: "stdevs"
                                +  description: "The standard deviation parameter of each batch. Must be greater than 0."
                                +  type_attr: "dtype"
                                +}
                                +input_arg {
                                +  name: "minvals"
                                +  description: "The minimum cutoff. May be -infinity."
                                +  type_attr: "dtype"
                                +}
                                +input_arg {
                                +  name: "maxvals"
                                +  description: "The maximum cutoff. May be +infinity, and must be more than the minval\nfor each batch."
                                +  type_attr: "dtype"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A matrix of shape num_batches x samples_per_batch, filled with random\ntruncated normal values using the parameters for each row."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the output."
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Transforms a vector of brain.Example protos (as strings) into typed tensors.
                                +
                                +parseExample :: forall v'1 v'2 v'3 v'4 v'5 sparse_types
                                +                tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64,
                                +                                   Float] sparse_types,
                                +                          OneOfs '[Data.ByteString.ByteString, Data.Int.Int64,
                                +                                   Float] tdense) => 
                                +                Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A vector containing a batch of binary serialized Example protos.
                                +                -> Tensor v'2 Data.ByteString.ByteString -- ^ __names__: A vector containing the names of the serialized protos.
                                +                                                         -- May contain, for example, table key (descriptive) names for the
                                +                                                         -- corresponding serialized protos.  These are purely useful for debugging
                                +                                                         -- purposes, and the presence of values here has no effect on the output.
                                +                                                         -- May also be an empty vector if no names are available.
                                +                                                         -- If non-empty, this vector must be the same length as "serialized".
                                +                -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __sparse_keys__: A list of Nsparse string Tensors (scalars).
                                +                                                           -- The keys expected in the Examples' features associated with sparse values.
                                +                -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __dense_keys__: A list of Ndense string Tensors (scalars).
                                +                                                           -- The keys expected in the Examples' features associated with dense values.
                                +                -> TensorList (v'5) tdense -- ^ __dense_defaults__: A list of Ndense Tensors (some may be empty).
                                +                                           -- dense_defaults[j] provides default values
                                +                                           -- when the example's feature_map lacks dense_key[j].  If an empty Tensor is
                                +                                           -- provided for dense_defaults[j], then the Feature dense_keys[j] is required.
                                +                                           -- The input type is inferred from dense_defaults[j], even when it's empty.
                                +                                           -- If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
                                +                                           -- then the shape of dense_defaults[j] must match that of dense_shapes[j].
                                +                                           -- If dense_shapes[j] has an undefined major dimension (variable strides dense
                                +                                           -- feature), dense_defaults[j] must contain a single element:
                                +                                           -- the padding element.
                                +                -> ([Tensor Build Data.Int.Int64],
                                +                    TensorList (Build) sparse_types,
                                +                    [Tensor Build Data.Int.Int64], TensorList (Build) tdense)
                                +                -- ^ (__sparse_indices__, __sparse_values__, __sparse_shapes__, __dense_values__)
                                +                --
                                +                -- * __sparse_indices__
                                +                --
                                +                -- * __sparse_values__
                                +                --
                                +                -- * __sparse_shapes__
                                +                --
                                +                -- * __dense_values__
                                +parseExample = parseExample' id
                                +parseExample' :: forall v'1 v'2 v'3 v'4 v'5 sparse_types
                                +                 tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64,
                                +                                    Float] sparse_types,
                                +                           OneOfs '[Data.ByteString.ByteString, Data.Int.Int64,
                                +                                    Float] tdense) => OpParams ->
                                +                 Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A vector containing a batch of binary serialized Example protos.
                                +                 -> Tensor v'2 Data.ByteString.ByteString -- ^ __names__: A vector containing the names of the serialized protos.
                                +                                                          -- May contain, for example, table key (descriptive) names for the
                                +                                                          -- corresponding serialized protos.  These are purely useful for debugging
                                +                                                          -- purposes, and the presence of values here has no effect on the output.
                                +                                                          -- May also be an empty vector if no names are available.
                                +                                                          -- If non-empty, this vector must be the same length as "serialized".
                                +                 -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __sparse_keys__: A list of Nsparse string Tensors (scalars).
                                +                                                            -- The keys expected in the Examples' features associated with sparse values.
                                +                 -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __dense_keys__: A list of Ndense string Tensors (scalars).
                                +                                                            -- The keys expected in the Examples' features associated with dense values.
                                +                 -> TensorList (v'5) tdense -- ^ __dense_defaults__: A list of Ndense Tensors (some may be empty).
                                +                                            -- dense_defaults[j] provides default values
                                +                                            -- when the example's feature_map lacks dense_key[j].  If an empty Tensor is
                                +                                            -- provided for dense_defaults[j], then the Feature dense_keys[j] is required.
                                +                                            -- The input type is inferred from dense_defaults[j], even when it's empty.
                                +                                            -- If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
                                +                                            -- then the shape of dense_defaults[j] must match that of dense_shapes[j].
                                +                                            -- If dense_shapes[j] has an undefined major dimension (variable strides dense
                                +                                            -- feature), dense_defaults[j] must contain a single element:
                                +                                            -- the padding element.
                                +                 -> ([Tensor Build Data.Int.Int64],
                                +                     TensorList (Build) sparse_types,
                                +                     [Tensor Build Data.Int.Int64], TensorList (Build) tdense)
                                +                 -- ^ (__sparse_indices__, __sparse_values__, __sparse_shapes__, __dense_values__)
                                +                 --
                                +                 -- * __sparse_indices__
                                +                 --
                                +                 -- * __sparse_values__
                                +                 --
                                +                 -- * __sparse_shapes__
                                +                 --
                                +                 -- * __dense_values__
                                +parseExample' op'options serialized names sparse_keys dense_keys
                                +              dense_defaults | eqLengthGuard [("Nsparse", [("sparse_keys", length sparse_keys)]),
                                +                                              ("Ndense", [("dense_keys", length dense_keys)])] =
                                +    pureOp [nsparse, nsparse] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized,
                                +                                                             buildInputs names,
                                +                                                             buildInputs sparse_keys,
                                +                                                             buildInputs dense_keys,
                                +                                                             buildInputs dense_defaults]
                                +        return (opDef "ParseExample"
                                +                & opAttr "sparse_types" .~ fromTensorTypes (Proxy :: Proxy sparse_types)
                                +                & opAttr "Tdense" .~ fromTensorTypes (Proxy :: Proxy tdense)
                                +                & opAttr "Nsparse" .~ nsparse
                                +                & opAttr "Ndense" .~ ndense
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    nsparse = fromIntegral (length sparse_keys) :: Int64
                                +    ndense = fromIntegral (length dense_keys) :: Int64
                                +{-
                                +input_arg {
                                +  name: "serialized"
                                +  description: "A vector containing a batch of binary serialized Example protos."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "names"
                                +  description: "A vector containing the names of the serialized protos.\nMay contain, for example, table key (descriptive) names for the\ncorresponding serialized protos.  These are purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no names are available.\nIf non-empty, this vector must be the same length as \"serialized\"."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "sparse_keys"
                                +  description: "A list of Nsparse string Tensors (scalars).\nThe keys expected in the Examples\' features associated with sparse values."
                                +  type: DT_STRING
                                +  number_attr: "Nsparse"
                                +}
                                +input_arg {
                                +  name: "dense_keys"
                                +  description: "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples\' features associated with dense values."
                                +  type: DT_STRING
                                +  number_attr: "Ndense"
                                +}
                                +input_arg {
                                +  name: "dense_defaults"
                                +  description: "A list of Ndense Tensors (some may be empty).\ndense_defaults[j] provides default values\nwhen the example\'s feature_map lacks dense_key[j].  If an empty Tensor is\nprovided for dense_defaults[j], then the Feature dense_keys[j] is required.\nThe input type is inferred from dense_defaults[j], even when it\'s empty.\nIf dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,\nthen the shape of dense_defaults[j] must match that of dense_shapes[j].\nIf dense_shapes[j] has an undefined major dimension (variable strides dense\nfeature), dense_defaults[j] must contain a single element:\nthe padding element."
                                +  type_list_attr: "Tdense"
                                +}
                                +output_arg {
                                +  name: "sparse_indices" type: DT_INT64 number_attr: "Nsparse"
                                +}
                                +output_arg { name: "sparse_values" type_list_attr: "sparse_types" }
                                +output_arg {
                                +  name: "sparse_shapes" type: DT_INT64 number_attr: "Nsparse"
                                +}
                                +output_arg { name: "dense_values" type_list_attr: "Tdense" }
                                +attr { name: "Nsparse" type: "int" has_minimum: true }
                                +attr { name: "Ndense" type: "int" has_minimum: true }
                                +attr {
                                +  name: "sparse_types"
                                +  type: "list(type)"
                                +  description: "A list of Nsparse types; the data types of data in each Feature\ngiven in sparse_keys.\nCurrently the ParseExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList)."
                                +  has_minimum: true
                                +  allowed_values {
                                +    list { type: DT_FLOAT type: DT_INT64 type: DT_STRING }
                                +  }
                                +}
                                +attr {
                                +  name: "Tdense"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  allowed_values {
                                +    list { type: DT_FLOAT type: DT_INT64 type: DT_STRING }
                                +  }
                                +}
                                +attr {
                                +  name: "dense_shapes"
                                +  type: "list(shape)"
                                +  description: "A list of Ndense shapes; the shapes of data in each Feature\ngiven in dense_keys.\nThe number of elements in the Feature corresponding to dense_key[j]\nmust always equal dense_shapes[j].NumEntries().\nIf dense_shapes[j] == (D0, D1, ..., DN) then the shape of output\nTensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):\nThe dense outputs are just the inputs row-stacked by batch.\nThis works for dense_shapes[j] = (-1, D1, ..., DN).  In this case\nthe shape of the output Tensor dense_values[j] will be\n(|serialized|, M, D1, .., DN), where M is the maximum number of blocks\nof elements of length D1 * .... * DN, across all minibatch entries\nin the input.  Any minibatch entry with less than M blocks of elements of\nlength D1 * ... * DN will be padded with the corresponding default_value\nscalar element along the second dimension."
                                +  has_minimum: true
                                +}
                                +-}
                                +
                                +-- | Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
                                +
                                +parseSingleSequenceExample :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8
                                +                              context_sparse_types tcontext_dense
                                +                              feature_list_dense_types
                                +                              feature_list_sparse_types . (OneOfs '[Data.ByteString.ByteString,
                                +                                                                    Data.Int.Int64,
                                +                                                                    Float] context_sparse_types,
                                +                                                           OneOfs '[Data.ByteString.ByteString,
                                +                                                                    Data.Int.Int64,
                                +                                                                    Float] tcontext_dense,
                                +                                                           OneOfs '[Data.ByteString.ByteString,
                                +                                                                    Data.Int.Int64,
                                +                                                                    Float] feature_list_dense_types,
                                +                                                           OneOfs '[Data.ByteString.ByteString,
                                +                                                                    Data.Int.Int64,
                                +                                                                    Float] feature_list_sparse_types) =>
                                +                              
                                +                              Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A scalar containing a binary serialized SequenceExample proto.
                                +                              -> Tensor v'2 Data.ByteString.ByteString -- ^ __feature_list_dense_missing_assumed_empty__: A vector listing the
                                +                                                                       -- FeatureList keys which may be missing from the SequenceExample.  If the
                                +                                                                       -- associated FeatureList is missing, it is treated as empty.  By default,
                                +                                                                       -- any FeatureList not listed in this vector must exist in the SequenceExample.
                                +                              -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __context_sparse_keys__: A list of Ncontext_sparse string Tensors (scalars).
                                +                                                                         -- The keys expected in the Examples' features associated with context_sparse
                                +                                                                         -- values.
                                +                              -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __context_dense_keys__: A list of Ncontext_dense string Tensors (scalars).
                                +                                                                         -- The keys expected in the SequenceExamples' context features associated with
                                +                                                                         -- dense values.
                                +                              -> [Tensor v'5 Data.ByteString.ByteString] -- ^ __feature_list_sparse_keys__: A list of Nfeature_list_sparse string Tensors
                                +                                                                         -- (scalars).  The keys expected in the FeatureLists associated with sparse
                                +                                                                         -- values.
                                +                              -> [Tensor v'6 Data.ByteString.ByteString] -- ^ __feature_list_dense_keys__: A list of Nfeature_list_dense string Tensors (scalars).
                                +                                                                         -- The keys expected in the SequenceExamples' feature_lists associated
                                +                                                                         -- with lists of dense values.
                                +                              -> TensorList (v'7) tcontext_dense -- ^ __context_dense_defaults__: A list of Ncontext_dense Tensors (some may be empty).
                                +                                                                 -- context_dense_defaults[j] provides default values
                                +                                                                 -- when the SequenceExample's context map lacks context_dense_key[j].
                                +                                                                 -- If an empty Tensor is provided for context_dense_defaults[j],
                                +                                                                 -- then the Feature context_dense_keys[j] is required.
                                +                                                                 -- The input type is inferred from context_dense_defaults[j], even when it's
                                +                                                                 -- empty.  If context_dense_defaults[j] is not empty, its shape must match
                                +                                                                 -- context_dense_shapes[j].
                                +                              -> Tensor v'8 Data.ByteString.ByteString -- ^ __debug_name__: A scalar containing the name of the serialized proto.
                                +                                                                       -- May contain, for example, table key (descriptive) name for the
                                +                                                                       -- corresponding serialized proto.  This is purely useful for debugging
                                +                                                                       -- purposes, and the presence of values here has no effect on the output.
                                +                                                                       -- May also be an empty scalar if no name is available.
                                +                              -> ([Tensor Build Data.Int.Int64],
                                +                                  TensorList (Build) context_sparse_types,
                                +                                  [Tensor Build Data.Int.Int64],
                                +                                  TensorList (Build) tcontext_dense,
                                +                                  [Tensor Build Data.Int.Int64],
                                +                                  TensorList (Build) feature_list_sparse_types,
                                +                                  [Tensor Build Data.Int.Int64],
                                +                                  TensorList (Build) feature_list_dense_types)
                                +                              -- ^ (__context_sparse_indices__, __context_sparse_values__, __context_sparse_shapes__, __context_dense_values__, __feature_list_sparse_indices__, __feature_list_sparse_values__, __feature_list_sparse_shapes__, __feature_list_dense_values__)
                                +                              --
                                +                              -- * __context_sparse_indices__
                                +                              --
                                +                              -- * __context_sparse_values__
                                +                              --
                                +                              -- * __context_sparse_shapes__
                                +                              --
                                +                              -- * __context_dense_values__
                                +                              --
                                +                              -- * __feature_list_sparse_indices__
                                +                              --
                                +                              -- * __feature_list_sparse_values__
                                +                              --
                                +                              -- * __feature_list_sparse_shapes__
                                +                              --
                                +                              -- * __feature_list_dense_values__
                                +parseSingleSequenceExample = parseSingleSequenceExample' id
                                +parseSingleSequenceExample' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8
                                +                               context_sparse_types tcontext_dense
                                +                               feature_list_dense_types
                                +                               feature_list_sparse_types . (OneOfs '[Data.ByteString.ByteString,
                                +                                                                     Data.Int.Int64,
                                +                                                                     Float] context_sparse_types,
                                +                                                            OneOfs '[Data.ByteString.ByteString,
                                +                                                                     Data.Int.Int64,
                                +                                                                     Float] tcontext_dense,
                                +                                                            OneOfs '[Data.ByteString.ByteString,
                                +                                                                     Data.Int.Int64,
                                +                                                                     Float] feature_list_dense_types,
                                +                                                            OneOfs '[Data.ByteString.ByteString,
                                +                                                                     Data.Int.Int64,
                                +                                                                     Float] feature_list_sparse_types) =>
                                +                               OpParams ->
                                +                               Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A scalar containing a binary serialized SequenceExample proto.
                                +                               -> Tensor v'2 Data.ByteString.ByteString -- ^ __feature_list_dense_missing_assumed_empty__: A vector listing the
                                +                                                                        -- FeatureList keys which may be missing from the SequenceExample.  If the
                                +                                                                        -- associated FeatureList is missing, it is treated as empty.  By default,
                                +                                                                        -- any FeatureList not listed in this vector must exist in the SequenceExample.
                                +                               -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __context_sparse_keys__: A list of Ncontext_sparse string Tensors (scalars).
                                +                                                                          -- The keys expected in the Examples' features associated with context_sparse
                                +                                                                          -- values.
                                +                               -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __context_dense_keys__: A list of Ncontext_dense string Tensors (scalars).
                                +                                                                          -- The keys expected in the SequenceExamples' context features associated with
                                +                                                                          -- dense values.
                                +                               -> [Tensor v'5 Data.ByteString.ByteString] -- ^ __feature_list_sparse_keys__: A list of Nfeature_list_sparse string Tensors
                                +                                                                          -- (scalars).  The keys expected in the FeatureLists associated with sparse
                                +                                                                          -- values.
                                +                               -> [Tensor v'6 Data.ByteString.ByteString] -- ^ __feature_list_dense_keys__: A list of Nfeature_list_dense string Tensors (scalars).
                                +                                                                          -- The keys expected in the SequenceExamples' feature_lists associated
                                +                                                                          -- with lists of dense values.
                                +                               -> TensorList (v'7) tcontext_dense -- ^ __context_dense_defaults__: A list of Ncontext_dense Tensors (some may be empty).
                                +                                                                  -- context_dense_defaults[j] provides default values
                                +                                                                  -- when the SequenceExample's context map lacks context_dense_key[j].
                                +                                                                  -- If an empty Tensor is provided for context_dense_defaults[j],
                                +                                                                  -- then the Feature context_dense_keys[j] is required.
                                +                                                                  -- The input type is inferred from context_dense_defaults[j], even when it's
                                +                                                                  -- empty.  If context_dense_defaults[j] is not empty, its shape must match
                                +                                                                  -- context_dense_shapes[j].
                                +                               -> Tensor v'8 Data.ByteString.ByteString -- ^ __debug_name__: A scalar containing the name of the serialized proto.
                                +                                                                        -- May contain, for example, table key (descriptive) name for the
                                +                                                                        -- corresponding serialized proto.  This is purely useful for debugging
                                +                                                                        -- purposes, and the presence of values here has no effect on the output.
                                +                                                                        -- May also be an empty scalar if no name is available.
                                +                               -> ([Tensor Build Data.Int.Int64],
                                +                                   TensorList (Build) context_sparse_types,
                                +                                   [Tensor Build Data.Int.Int64],
                                +                                   TensorList (Build) tcontext_dense,
                                +                                   [Tensor Build Data.Int.Int64],
                                +                                   TensorList (Build) feature_list_sparse_types,
                                +                                   [Tensor Build Data.Int.Int64],
                                +                                   TensorList (Build) feature_list_dense_types)
                                +                               -- ^ (__context_sparse_indices__, __context_sparse_values__, __context_sparse_shapes__, __context_dense_values__, __feature_list_sparse_indices__, __feature_list_sparse_values__, __feature_list_sparse_shapes__, __feature_list_dense_values__)
                                +                               --
                                +                               -- * __context_sparse_indices__
                                +                               --
                                +                               -- * __context_sparse_values__
                                +                               --
                                +                               -- * __context_sparse_shapes__
                                +                               --
                                +                               -- * __context_dense_values__
                                +                               --
                                +                               -- * __feature_list_sparse_indices__
                                +                               --
                                +                               -- * __feature_list_sparse_values__
                                +                               --
                                +                               -- * __feature_list_sparse_shapes__
                                +                               --
                                +                               -- * __feature_list_dense_values__
                                +parseSingleSequenceExample' op'options serialized
                                +                            feature_list_dense_missing_assumed_empty
                                +                            context_sparse_keys context_dense_keys
                                +                            feature_list_sparse_keys feature_list_dense_keys
                                +                            context_dense_defaults
                                +                            debug_name | eqLengthGuard [("Ncontext_sparse", [("context_sparse_keys", length context_sparse_keys)]),
                                +                                                        ("Ncontext_dense", [("context_dense_keys", length context_dense_keys)]),
                                +                                                        ("Nfeature_list_sparse", [("feature_list_sparse_keys", length feature_list_sparse_keys)]),
                                +                                                        ("Nfeature_list_dense", [("feature_list_dense_keys", length feature_list_dense_keys)])] =
                                +    pureOp [ncontext_sparse, ncontext_sparse, nfeature_list_sparse,
                                +            nfeature_list_sparse] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized,
                                +                                                             buildInputs feature_list_dense_missing_assumed_empty,
                                +                                                             buildInputs context_sparse_keys,
                                +                                                             buildInputs context_dense_keys,
                                +                                                             buildInputs feature_list_sparse_keys,
                                +                                                             buildInputs feature_list_dense_keys,
                                +                                                             buildInputs context_dense_defaults,
                                +                                                             buildInputs debug_name]
                                +        return (opDef "ParseSingleSequenceExample"
                                +                & opAttr "context_sparse_types" .~ fromTensorTypes (Proxy :: Proxy context_sparse_types)
                                +                & opAttr "Tcontext_dense" .~ fromTensorTypes (Proxy :: Proxy tcontext_dense)
                                +                & opAttr "feature_list_dense_types" .~ fromTensorTypes (Proxy :: Proxy feature_list_dense_types)
                                +                & opAttr "feature_list_sparse_types" .~ fromTensorTypes (Proxy :: Proxy feature_list_sparse_types)
                                +                & opAttr "Ncontext_sparse" .~ ncontext_sparse
                                +                & opAttr "Ncontext_dense" .~ ncontext_dense
                                +                & opAttr "Nfeature_list_sparse" .~ nfeature_list_sparse
                                +                & opAttr "Nfeature_list_dense" .~ nfeature_list_dense
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    ncontext_sparse = fromIntegral (length context_sparse_keys) :: Int64
                                +    ncontext_dense = fromIntegral (length context_dense_keys) :: Int64
                                +    nfeature_list_sparse = fromIntegral (length feature_list_sparse_keys) :: Int64
                                +    nfeature_list_dense = fromIntegral (length feature_list_dense_keys) :: Int64
                                +{-
                                +input_arg {
                                +  name: "serialized"
                                +  description: "A scalar containing a binary serialized SequenceExample proto."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "feature_list_dense_missing_assumed_empty"
                                +  description: "A vector listing the\nFeatureList keys which may be missing from the SequenceExample.  If the\nassociated FeatureList is missing, it is treated as empty.  By default,\nany FeatureList not listed in this vector must exist in the SequenceExample."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "context_sparse_keys"
                                +  description: "A list of Ncontext_sparse string Tensors (scalars).\nThe keys expected in the Examples\' features associated with context_sparse\nvalues."
                                +  type: DT_STRING
                                +  number_attr: "Ncontext_sparse"
                                +}
                                +input_arg {
                                +  name: "context_dense_keys"
                                +  description: "A list of Ncontext_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples\' context features associated with\ndense values."
                                +  type: DT_STRING
                                +  number_attr: "Ncontext_dense"
                                +}
                                +input_arg {
                                +  name: "feature_list_sparse_keys"
                                +  description: "A list of Nfeature_list_sparse string Tensors\n(scalars).  The keys expected in the FeatureLists associated with sparse\nvalues."
                                +  type: DT_STRING
                                +  number_attr: "Nfeature_list_sparse"
                                +}
                                +input_arg {
                                +  name: "feature_list_dense_keys"
                                +  description: "A list of Nfeature_list_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples\' feature_lists associated\nwith lists of dense values."
                                +  type: DT_STRING
                                +  number_attr: "Nfeature_list_dense"
                                +}
                                +input_arg {
                                +  name: "context_dense_defaults"
                                +  description: "A list of Ncontext_dense Tensors (some may be empty).\ncontext_dense_defaults[j] provides default values\nwhen the SequenceExample\'s context map lacks context_dense_key[j].\nIf an empty Tensor is provided for context_dense_defaults[j],\nthen the Feature context_dense_keys[j] is required.\nThe input type is inferred from context_dense_defaults[j], even when it\'s\nempty.  If context_dense_defaults[j] is not empty, its shape must match\ncontext_dense_shapes[j]."
                                +  type_list_attr: "Tcontext_dense"
                                +}
                                +input_arg {
                                +  name: "debug_name"
                                +  description: "A scalar containing the name of the serialized proto.\nMay contain, for example, table key (descriptive) name for the\ncorresponding serialized proto.  This is purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty scalar if no name is available."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "context_sparse_indices"
                                +  type: DT_INT64
                                +  number_attr: "Ncontext_sparse"
                                +}
                                +output_arg {
                                +  name: "context_sparse_values"
                                +  type_list_attr: "context_sparse_types"
                                +}
                                +output_arg {
                                +  name: "context_sparse_shapes"
                                +  type: DT_INT64
                                +  number_attr: "Ncontext_sparse"
                                +}
                                +output_arg {
                                +  name: "context_dense_values" type_list_attr: "Tcontext_dense"
                                +}
                                +output_arg {
                                +  name: "feature_list_sparse_indices"
                                +  type: DT_INT64
                                +  number_attr: "Nfeature_list_sparse"
                                +}
                                +output_arg {
                                +  name: "feature_list_sparse_values"
                                +  type_list_attr: "feature_list_sparse_types"
                                +}
                                +output_arg {
                                +  name: "feature_list_sparse_shapes"
                                +  type: DT_INT64
                                +  number_attr: "Nfeature_list_sparse"
                                +}
                                +output_arg {
                                +  name: "feature_list_dense_values"
                                +  type_list_attr: "feature_list_dense_types"
                                +}
                                +attr {
                                +  name: "Ncontext_sparse"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "Ncontext_dense"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "Nfeature_list_sparse"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "Nfeature_list_dense"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "context_sparse_types"
                                +  type: "list(type)"
                                +  default_value { list { } }
                                +  description: "A list of Ncontext_sparse types; the data types of data in\neach context Feature given in context_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList)."
                                +  has_minimum: true
                                +  allowed_values {
                                +    list { type: DT_FLOAT type: DT_INT64 type: DT_STRING }
                                +  }
                                +}
                                +attr {
                                +  name: "Tcontext_dense"
                                +  type: "list(type)"
                                +  default_value { list { } }
                                +  has_minimum: true
                                +  allowed_values {
                                +    list { type: DT_FLOAT type: DT_INT64 type: DT_STRING }
                                +  }
                                +}
                                +attr {
                                +  name: "feature_list_dense_types"
                                +  type: "list(type)"
                                +  default_value { list { } }
                                +  has_minimum: true
                                +  allowed_values {
                                +    list { type: DT_FLOAT type: DT_INT64 type: DT_STRING }
                                +  }
                                +}
                                +attr {
                                +  name: "context_dense_shapes"
                                +  type: "list(shape)"
                                +  default_value { list { } }
                                +  description: "A list of Ncontext_dense shapes; the shapes of data in\neach context Feature given in context_dense_keys.\nThe number of elements in the Feature corresponding to context_dense_key[j]\nmust always equal context_dense_shapes[j].NumEntries().\nThe shape of context_dense_values[j] will match context_dense_shapes[j]."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "feature_list_sparse_types"
                                +  type: "list(type)"
                                +  default_value { list { } }
                                +  description: "A list of Nfeature_list_sparse types; the data types\nof data in each FeatureList given in feature_list_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList)."
                                +  has_minimum: true
                                +  allowed_values {
                                +    list { type: DT_FLOAT type: DT_INT64 type: DT_STRING }
                                +  }
                                +}
                                +attr {
                                +  name: "feature_list_dense_shapes"
                                +  type: "list(shape)"
                                +  default_value { list { } }
                                +  description: "A list of Nfeature_list_dense shapes; the shapes of\ndata in each FeatureList given in feature_list_dense_keys.\nThe shape of each Feature in the FeatureList corresponding to\nfeature_list_dense_key[j] must always equal\nfeature_list_dense_shapes[j].NumEntries()."
                                +  has_minimum: true
                                +}
                                +-}
                                +
                                +-- | Transforms a serialized tensorflow.TensorProto proto into a Tensor.
                                +
                                +parseTensor :: forall v'1 out_type . (TensorType out_type) => 
                                +               Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A scalar string containing a serialized TensorProto proto.
                                +               -> Tensor Build out_type -- ^ __output__: A Tensor of type `out_type`.
                                +parseTensor = parseTensor' id
                                +parseTensor' :: forall v'1 out_type . (TensorType out_type) => OpParams ->
                                +                Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A scalar string containing a serialized TensorProto proto.
                                +                -> Tensor Build out_type -- ^ __output__: A Tensor of type `out_type`.
                                +parseTensor' op'options serialized | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized]
                                +        return (opDef "ParseTensor"
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "serialized"
                                +  description: "A scalar string containing a serialized TensorProto proto."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A Tensor of type `out_type`."
                                +  type_attr: "out_type"
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  description: "The type of the serialized tensor.  The provided type must match the\ntype of the serialized tensor and no implicit conversion will take place."
                                +}
                                +-}
                                +
                                +-- | A placeholder op for a value that will be fed into the computation.
                                +--
                                +-- N.B. This operation will fail with an error if it is executed. It is
                                +-- intended as a way to represent a value that will always be fed, and to
                                +-- provide attrs that enable the fed value to be checked at runtime.
                                +placeholder :: forall dtype . (TensorType dtype) => 
                                +               Tensor Build dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism.
                                +placeholder = placeholder' id
                                +placeholder' :: forall dtype . (TensorType dtype) => OpParams ->
                                +                Tensor Build dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism.
                                +placeholder' op'options | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        return (opDef "Placeholder"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "output"
                                +  description: "A placeholder tensor that must be replaced using the feed mechanism."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of elements in the tensor."
                                +}
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +  description: "(Optional) The shape of the tensor. If the shape has 0 dimensions, the\nshape is unconstrained."
                                +}
                                +-}
                                +
                                +-- | A placeholder op for a value that will be fed into the computation.
                                +--
                                +-- N.B. This operation will fail with an error if it is executed. It is
                                +-- intended as a way to represent a value that will always be fed, and to
                                +-- provide attrs that enable the fed value to be checked at runtime.
                                +placeholderV2 :: forall dtype . (TensorType dtype) => 
                                +                 Shape -- ^ __shape__: The shape of the tensor. The shape can be any partially-specified
                                +                       -- shape.  To be unconstrained, pass in a shape with unknown rank.
                                +                 -> Tensor Build dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism.
                                +placeholderV2 = placeholderV2' id
                                +placeholderV2' :: forall dtype . (TensorType dtype) => OpParams ->
                                +                  Shape -- ^ __shape__: The shape of the tensor. The shape can be any partially-specified
                                +                        -- shape.  To be unconstrained, pass in a shape with unknown rank.
                                +                  -> Tensor Build dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism.
                                +placeholderV2' op'options shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        return (opDef "PlaceholderV2"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & opAttr "shape" .~ shape
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "output"
                                +  description: "A placeholder tensor that must be replaced using the feed mechanism."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of elements in the tensor."
                                +}
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  description: "The shape of the tensor. The shape can be any partially-specified\nshape.  To be unconstrained, pass in a shape with unknown rank."
                                +}
                                +-}
                                +
                                +-- | A placeholder op that passes through `input` when its output is not fed.
                                +
                                +placeholderWithDefault :: forall v'1 dtype . (TensorType dtype) => 
                                +                          Shape -- ^ __shape__: The (possibly partial) shape of the tensor.
                                +                          -> Tensor v'1 dtype -- ^ __input__: The default value to produce when `output` is not fed.
                                +                          -> Tensor Build dtype -- ^ __output__: A placeholder tensor that defaults to `input` if it is not fed.
                                +placeholderWithDefault = placeholderWithDefault' id
                                +placeholderWithDefault' :: forall v'1 dtype . (TensorType dtype) => OpParams ->
                                +                           Shape -- ^ __shape__: The (possibly partial) shape of the tensor.
                                +                           -> Tensor v'1 dtype -- ^ __input__: The default value to produce when `output` is not fed.
                                +                           -> Tensor Build dtype -- ^ __output__: A placeholder tensor that defaults to `input` if it is not fed.
                                +placeholderWithDefault' op'options shape input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "PlaceholderWithDefault"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & opAttr "shape" .~ shape
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "The default value to produce when `output` is not fed."
                                +  type_attr: "dtype"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A placeholder tensor that defaults to `input` if it is not fed."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of elements in the tensor."
                                +}
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  description: "The (possibly partial) shape of the tensor."
                                +}
                                +-}
                                +
                                +-- | Compute the polygamma function \\(\psi^{(n)}(x)\\).
                                +--
                                +-- The polygamma function is defined as:
                                +-- 
                                +-- 
                                +-- \\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\)
                                +-- 
                                +-- where \\(\psi(x)\\) is the digamma function.
                                +polygamma :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +             Tensor v'1 t -- ^ __a__
                                +             -> Tensor v'2 t -- ^ __x__
                                +             -> Tensor Build t -- ^ __z__
                                +polygamma = polygamma' id
                                +polygamma' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams ->
                                +              Tensor v'1 t -- ^ __a__
                                +              -> Tensor v'2 t -- ^ __x__
                                +              -> Tensor Build t -- ^ __z__
                                +polygamma' op'options a x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a,
                                +                                                             buildInputs x]
                                +        return (opDef "Polygamma"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "a" type_attr: "T" }
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Computes the power of one value to another.
                                +--
                                +-- Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
                                +-- corresponding elements in `x` and `y`. For example:
                                +-- 
                                +-- ```
                                +-- # tensor 'x' is [[2, 2]], [3, 3]]
                                +-- # tensor 'y' is [[8, 16], [2, 3]]
                                +-- tf.pow(x, y) ==> [[256, 65536], [9, 27]]
                                +-- ```
                                +pow :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                   (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                   Data.Int.Int64, Data.Word.Word16, Double,
                                +                                   Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor v'2 t -- ^ __y__
                                +       -> Tensor Build t -- ^ __z__
                                +pow = pow' id
                                +pow' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int32, Data.Int.Int64,
                                +                                    Data.Word.Word16, Double, Float] t) =>
                                +        OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor v'2 t -- ^ __y__
                                +        -> Tensor Build t -- ^ __z__
                                +pow' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Pow"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | An identity op that triggers an error if a gradient is requested.
                                +--
                                +-- When executed in a graph, this op outputs its input tensor as-is.
                                +-- 
                                +-- When building ops to compute gradients, the TensorFlow gradient system
                                +-- will return an error when trying to lookup the gradient of this op,
                                +-- because no gradient must ever be registered for this function.  This
                                +-- op exists to prevent subtle bugs from silently returning unimplemented
                                +-- gradients in some corner cases.
                                +preventGradient :: forall v'1 t . (TensorType t) => 
                                +                   Tensor v'1 t -- ^ __input__: any tensor.
                                +                   -> Tensor Build t -- ^ __output__: the same input tensor.
                                +preventGradient = preventGradient' id
                                +preventGradient' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                    Tensor v'1 t -- ^ __input__: any tensor.
                                +                    -> Tensor Build t -- ^ __output__: the same input tensor.
                                +preventGradient' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "PreventGradient"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "any tensor." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output" description: "the same input tensor." type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "message"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "Will be printed in the error when anyone tries to differentiate\nthis operation."
                                +}
                                +-}
                                +
                                +-- | Prints a list of tensors.
                                +--
                                +-- Passes `input` through to `output` and prints `data` when evaluating.
                                +print :: forall v'1 v'2 t u m' . (MonadBuild m', TensorType t, TensorTypes u) =>
                                +         
                                +         Tensor v'1 t -- ^ __input__: The tensor passed to `output`
                                +         -> TensorList (v'2) u -- ^ __data__: A list of tensors to print out when op is evaluated.
                                +         -> m' (Tensor Value t) -- ^ __output__: = The unmodified `input` tensor
                                +print = print' id
                                +print' :: forall v'1 v'2 t u m' . (MonadBuild m', TensorType t,
                                +                                   TensorTypes u) => OpParams ->
                                +          Tensor v'1 t -- ^ __input__: The tensor passed to `output`
                                +          -> TensorList (v'2) u -- ^ __data__: A list of tensors to print out when op is evaluated.
                                +          -> m' (Tensor Value t) -- ^ __output__: = The unmodified `input` tensor
                                +print' op'options input data' | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs data']
                                +        buildOp [] (opDef "Print"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "U" .~ fromTensorTypes (Proxy :: Proxy u)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "The tensor passed to `output`"
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "data"
                                +  description: "A list of tensors to print out when op is evaluated."
                                +  type_list_attr: "U"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "= The unmodified `input` tensor"
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr { name: "U" type: "list(type)" has_minimum: true minimum: 1 }
                                +attr {
                                +  name: "message"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "A string, prefix of the error message."
                                +}
                                +attr {
                                +  name: "first_n"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "Only log `first_n` number of times. -1 disables logging."
                                +}
                                +attr {
                                +  name: "summarize"
                                +  type: "int"
                                +  default_value { i: 3 }
                                +  description: "Only print this many entries of each tensor."
                                +}
                                +-}
                                +
                                +-- | A queue that produces elements sorted by the first component value.
                                +--
                                +-- Note that the PriorityQueue requires the first component of any element
                                +-- to be a scalar int64, in addition to the other elements declared by
                                +-- component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
                                +-- and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
                                +-- entry in their input (resp. output) lists.
                                +priorityQueue :: forall m' . (MonadBuild m') => 
                                +                 m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
                                +priorityQueue = priorityQueue' id
                                +priorityQueue' :: forall m' . (MonadBuild m') => OpParams ->
                                +                  m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
                                +priorityQueue' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "PriorityQueue"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  default_value { list { } }
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "shapes"
                                +  type: "list(shape)"
                                +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | A queue that produces elements sorted by the first component value.
                                +--
                                +-- Note that the PriorityQueue requires the first component of any element
                                +-- to be a scalar int64, in addition to the other elements declared by
                                +-- component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
                                +-- and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
                                +-- entry in their input (resp. output) lists.
                                +priorityQueueV2 :: forall m' . (MonadBuild m') => 
                                +                   m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue.
                                +priorityQueueV2 = priorityQueueV2' id
                                +priorityQueueV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                    m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue.
                                +priorityQueueV2' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "PriorityQueueV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the queue."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  default_value { list { } }
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "shapes"
                                +  type: "list(shape)"
                                +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | Computes the product of elements across dimensions of a tensor.
                                +--
                                +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
                                +-- retained with length 1.
                                +prod :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t,
                                +                                 OneOf '[Data.Int.Int32,
                                +                                         Data.Int.Int64] tidx) => 
                                +        Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +        -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +        -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +prod = prod' id
                                +prod' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                          (Data.Complex.Complex Float),
                                +                                          Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t,
                                +                                  OneOf '[Data.Int.Int32,
                                +                                          Data.Int.Int64] tidx) => OpParams ->
                                +         Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +         -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +         -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +prod' op'options input reduction_indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs reduction_indices]
                                +        return (opDef "Prod"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The tensor to reduce." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "reduction_indices"
                                +  description: "The dimensions to reduce."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output" description: "The reduced tensor." type_attr: "T"
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the QR decompositions of one or more matrices.
                                +--
                                +-- Computes the QR decomposition of each inner matrix in `tensor` such that
                                +-- `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
                                +-- 
                                +-- ```python
                                +-- # a is a tensor.
                                +-- # q is a tensor of orthonormal matrices.
                                +-- # r is a tensor of upper triangular matrices.
                                +-- q, r = qr(a)
                                +-- q_full, r_full = qr(a, full_matrices=True)
                                +-- ```
                                +qr :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                              (Data.Complex.Complex Float), Double, Float] t) =>
                                +      
                                +      Tensor v'1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
                                +                   -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
                                +      -> (Tensor Build t, Tensor Build t) -- ^ (__q__, __r__)
                                +      --
                                +      -- * __q__: Orthonormal basis for range of `a`. If `full_matrices` is `False` then
                                +      -- shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
                                +      -- `[..., M, M]`.
                                +      --
                                +      -- * __r__: Triangular factor. If `full_matrices` is `False` then shape is
                                +      -- `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
                                +qr = qr' id
                                +qr' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Double,
                                +                               Float] t) => OpParams ->
                                +       Tensor v'1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
                                +                    -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
                                +       -> (Tensor Build t, Tensor Build t) -- ^ (__q__, __r__)
                                +       --
                                +       -- * __q__: Orthonormal basis for range of `a`. If `full_matrices` is `False` then
                                +       -- shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
                                +       -- `[..., M, M]`.
                                +       --
                                +       -- * __r__: Triangular factor. If `full_matrices` is `False` then shape is
                                +       -- `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
                                +qr' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Qr"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "q"
                                +  description: "Orthonormal basis for range of `a`. If `full_matrices` is `False` then\nshape is `[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "r"
                                +  description: "Triangular factor. If `full_matrices` is `False` then shape is\n`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "full_matrices"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, compute full-sized `q` and `r`. If false\n(the default), compute only the leading `P` columns of `q`."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_DOUBLE
                                +      type: DT_FLOAT
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Use QuantizeAndDequantizeV2 instead.
                                +
                                +quantizeAndDequantize :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +                         Tensor v'1 t -- ^ __input__
                                +                         -> Tensor Build t -- ^ __output__
                                +quantizeAndDequantize = quantizeAndDequantize' id
                                +quantizeAndDequantize' :: forall v'1 t . (OneOf '[Double, Float] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 t -- ^ __input__
                                +                          -> Tensor Build t -- ^ __output__
                                +quantizeAndDequantize' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "QuantizeAndDequantize"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "signed_input" type: "bool" default_value { b: true }
                                +}
                                +attr { name: "num_bits" type: "int" default_value { i: 8 } }
                                +attr {
                                +  name: "range_given" type: "bool" default_value { b: false }
                                +}
                                +attr { name: "input_min" type: "float" default_value { f: 0.0 } }
                                +attr { name: "input_max" type: "float" default_value { f: 0.0 } }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Quantizes then dequantizes a tensor.
                                +--
                                +-- This op simulates the precision loss from the quantized forward pass by:
                                +-- 1. Quantizing the tensor to fixed point numbers, which should match the target
                                +--    quantization method when it is used in inference.
                                +-- 2. Dequantizing it back to floating point numbers for the following ops, most
                                +--    likely matmul.
                                +-- 
                                +-- There are different ways to quantize. This version does not use the full range
                                +-- of the output type, choosing to elide the lowest possible value for symmetry
                                +-- (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
                                +-- quantization), so that 0.0 maps to 0.
                                +-- 
                                +-- To perform this op, we first find the range of values in our tensor. The range
                                +-- we use is always centered on 0, so we find m such that
                                +-- 
                                +-- 1. m = max(abs(input_min), abs(input_max)) if range_given is true,
                                +-- 2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
                                +-- 
                                +-- Our input tensor range is then [-m, m].
                                +-- 
                                +-- Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
                                +-- If signed_input is true, this is
                                +-- 
                                +--   [min_fixed, max_fixed ] =
                                +--       [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
                                +-- 
                                +-- Otherwise, if signed_input is false, the fixed-point range is
                                +-- 
                                +--   [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
                                +-- 
                                +-- From this we compute our scaling factor, s:
                                +-- 
                                +--   s = (max_fixed - min_fixed) / (2 * m).
                                +-- 
                                +-- Now we can quantize and dequantize the elements of our tensor.  An element e
                                +-- is transformed into e':
                                +-- 
                                +--   e' = (e * s).round_to_nearest() / s.
                                +-- 
                                +-- Note that we have a different number of buckets in the signed vs. unsigned
                                +-- cases.  For example, if num_bits == 8, we get 254 buckets in the signed case
                                +-- vs. 255 in the unsigned case.
                                +-- 
                                +-- For example, suppose num_bits = 8 and m = 1.  Then
                                +-- 
                                +--   [min_fixed, max_fixed] = [-127, 127], and
                                +--   s = (127 + 127) / 2 = 127.
                                +-- 
                                +-- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
                                +-- {-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
                                +quantizeAndDequantizeV2 :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => 
                                +                           Tensor v'1 t -- ^ __input__: Tensor to quantize and then dequantize.
                                +                           -> Tensor v'2 t -- ^ __input_min__: If range_given, this is the min of the range, otherwise this input
                                +                                           -- will be ignored.
                                +                           -> Tensor v'3 t -- ^ __input_max__: If range_given, this is the max of the range, otherwise this input
                                +                                           -- will be ignored.
                                +                           -> Tensor Build t -- ^ __output__
                                +quantizeAndDequantizeV2 = quantizeAndDequantizeV2' id
                                +quantizeAndDequantizeV2' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) =>
                                +                            OpParams ->
                                +                            Tensor v'1 t -- ^ __input__: Tensor to quantize and then dequantize.
                                +                            -> Tensor v'2 t -- ^ __input_min__: If range_given, this is the min of the range, otherwise this input
                                +                                            -- will be ignored.
                                +                            -> Tensor v'3 t -- ^ __input_max__: If range_given, this is the max of the range, otherwise this input
                                +                                            -- will be ignored.
                                +                            -> Tensor Build t -- ^ __output__
                                +quantizeAndDequantizeV2' op'options input input_min
                                +                         input_max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs input_min,
                                +                                                             buildInputs input_max]
                                +        return (opDef "QuantizeAndDequantizeV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "Tensor to quantize and then dequantize."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "input_min"
                                +  description: "If range_given, this is the min of the range, otherwise this input\nwill be ignored."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "input_max"
                                +  description: "If range_given, this is the max of the range, otherwise this input\nwill be ignored."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "signed_input"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If the quantization is signed or unsigned."
                                +}
                                +attr {
                                +  name: "num_bits"
                                +  type: "int"
                                +  default_value { i: 8 }
                                +  description: "The bitwidth of the quantization."
                                +}
                                +attr {
                                +  name: "range_given"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If the range is given or should be computed from the tensor."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Quantizes then dequantizes a tensor.
                                +--
                                +-- This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
                                +-- tensor, so its value can change during training.
                                +quantizeAndDequantizeV3 :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Double,
                                +                                                               Float] t) => 
                                +                           Tensor v'1 t -- ^ __input__
                                +                           -> Tensor v'2 t -- ^ __input_min__
                                +                           -> Tensor v'3 t -- ^ __input_max__
                                +                           -> Tensor v'4 Data.Int.Int32 -- ^ __num_bits__
                                +                           -> Tensor Build t -- ^ __output__
                                +quantizeAndDequantizeV3 = quantizeAndDequantizeV3' id
                                +quantizeAndDequantizeV3' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Double,
                                +                                                                Float] t) =>
                                +                            OpParams ->
                                +                            Tensor v'1 t -- ^ __input__
                                +                            -> Tensor v'2 t -- ^ __input_min__
                                +                            -> Tensor v'3 t -- ^ __input_max__
                                +                            -> Tensor v'4 Data.Int.Int32 -- ^ __num_bits__
                                +                            -> Tensor Build t -- ^ __output__
                                +quantizeAndDequantizeV3' op'options input input_min input_max
                                +                         num_bits | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs input_min,
                                +                                                             buildInputs input_max,
                                +                                                             buildInputs num_bits]
                                +        return (opDef "QuantizeAndDequantizeV3"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg { name: "input_min" type_attr: "T" }
                                +input_arg { name: "input_max" type_attr: "T" }
                                +input_arg { name: "num_bits" type: DT_INT32 }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "signed_input" type: "bool" default_value { b: true }
                                +}
                                +attr { name: "range_given" type: "bool" default_value { b: true } }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Convert the quantized 'input' tensor into a lower-precision 'output', using the
                                +--
                                +-- actual distribution of the values to maximize the usage of the lower bit depth
                                +-- and adjusting the output min and max ranges accordingly.
                                +-- 
                                +-- [input_min, input_max] are scalar floats that specify the range for the float
                                +-- interpretation of the 'input' data. For example, if input_min is -1.0f and
                                +-- input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
                                +-- value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
                                +-- 
                                +-- This operator tries to squeeze as much precision as possible into an output with
                                +-- a lower bit depth by calculating the actual min and max values found in the
                                +-- data. For example, maybe that quint16 input has no values lower than 16,384 and
                                +-- none higher than 49,152. That means only half the range is actually needed, all
                                +-- the float interpretations are between -0.5f and 0.5f, so if we want to compress
                                +-- the data into a quint8 output, we can use that range rather than the theoretical
                                +-- -1.0f to 1.0f that is suggested by the input min and max.
                                +-- 
                                +-- In practice, this is most useful for taking output from operations like
                                +-- QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
                                +-- may have large potential output ranges, but in practice have a distribution of
                                +-- input values that only uses a small fraction of the possible range. By feeding
                                +-- that output into this operator, we can reduce it from 32 bits down to 8 with
                                +-- minimal loss of accuracy.
                                +quantizeDownAndShrinkRange :: forall v'1 v'2 v'3 tinput
                                +                              out_type . (OneOf '[Data.Int.Int16,
                                +                                                  Data.Int.Int32,
                                +                                                  Data.Word.Word16,
                                +                                                  Data.Word.Word8] tinput,
                                +                                          OneOf '[Data.Int.Int16,
                                +                                                  Data.Int.Int32,
                                +                                                  Data.Word.Word16,
                                +                                                  Data.Word.Word8] out_type) => 
                                +                              Tensor v'1 tinput -- ^ __input__
                                +                              -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents.
                                +                              -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents.
                                +                              -> (Tensor Build out_type, Tensor Build Float,
                                +                                  Tensor Build Float)
                                +                              -- ^ (__output__, __output_min__, __output_max__)
                                +                              --
                                +                              -- * __output__
                                +                              --
                                +                              -- * __output_min__: The float value that the minimum quantized output value represents.
                                +                              --
                                +                              -- * __output_max__: The float value that the maximum quantized output value represents.
                                +quantizeDownAndShrinkRange = quantizeDownAndShrinkRange' id
                                +quantizeDownAndShrinkRange' :: forall v'1 v'2 v'3 tinput
                                +                               out_type . (OneOf '[Data.Int.Int16,
                                +                                                   Data.Int.Int32,
                                +                                                   Data.Word.Word16,
                                +                                                   Data.Word.Word8] tinput,
                                +                                           OneOf '[Data.Int.Int16,
                                +                                                   Data.Int.Int32,
                                +                                                   Data.Word.Word16,
                                +                                                   Data.Word.Word8] out_type) =>
                                +                               OpParams ->
                                +                               Tensor v'1 tinput -- ^ __input__
                                +                               -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents.
                                +                               -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents.
                                +                               -> (Tensor Build out_type, Tensor Build Float,
                                +                                   Tensor Build Float)
                                +                               -- ^ (__output__, __output_min__, __output_max__)
                                +                               --
                                +                               -- * __output__
                                +                               --
                                +                               -- * __output_min__: The float value that the minimum quantized output value represents.
                                +                               --
                                +                               -- * __output_max__: The float value that the maximum quantized output value represents.
                                +quantizeDownAndShrinkRange' op'options input input_min
                                +                            input_max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs input_min,
                                +                                                             buildInputs input_max]
                                +        return (opDef "QuantizeDownAndShrinkRange"
                                +                & opAttr "Tinput" .~ tensorType (undefined :: tinput)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "Tinput" }
                                +input_arg {
                                +  name: "input_min"
                                +  description: "The float value that the minimum quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "input_max"
                                +  description: "The float value that the maximum quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "output" type_attr: "out_type" }
                                +output_arg {
                                +  name: "output_min"
                                +  description: "The float value that the minimum quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output_max"
                                +  description: "The float value that the maximum quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "Tinput"
                                +  type: "type"
                                +  description: "The type of the input."
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  description: "The type of the output. Should be a lower bit depth than Tinput."
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
                                +--
                                +-- [min_range, max_range] are scalar floats that specify the range for
                                +-- the 'input' data. The 'mode' attribute controls exactly which calculations are
                                +-- used to convert the float values to their quantized equivalents.
                                +-- 
                                +-- In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
                                +-- 
                                +-- ```
                                +-- out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
                                +-- if T == qint8, out[i] -= (range(T) + 1) / 2.0
                                +-- ```
                                +-- here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
                                +-- 
                                +-- *MIN_COMBINED Mode Example*
                                +-- 
                                +-- Assume the input is type float and has a possible range of [0.0, 6.0] and the
                                +-- output type is quint8 ([0, 255]). The min_range and max_range values should be
                                +-- specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
                                +-- value of the input by 255/6 and cast to quint8.
                                +-- 
                                +-- If the output type was qint8 ([-128, 127]), the operation will additionally
                                +-- subtract each value by 128 prior to casting, so that the range of values aligns
                                +-- with the range of qint8.
                                +-- 
                                +-- If the mode is 'MIN_FIRST', then this approach is used:
                                +-- 
                                +-- ```
                                +-- number_of_steps = 1 << (# of bits in T)
                                +-- range_adjust = number_of_steps / (number_of_steps - 1)
                                +-- range = (range_max - range_min) * range_adjust
                                +-- range_scale = number_of_steps / range
                                +-- quantized = round(input * range_scale) - round(range_min * range_scale) +
                                +--   numeric_limits<T>::min()
                                +-- quantized = max(quantized, numeric_limits<T>::min())
                                +-- quantized = min(quantized, numeric_limits<T>::max())
                                +-- ```
                                +-- 
                                +-- The biggest difference between this and MIN_COMBINED is that the minimum range
                                +-- is rounded first, before it's subtracted from the rounded value. With
                                +-- MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
                                +-- and dequantizing will introduce a larger and larger error.
                                +-- 
                                +-- One thing to watch out for is that the operator may choose to adjust the
                                +-- requested minimum and maximum values slightly during the quantization process,
                                +-- so you should always use the output ports as the range for further calculations.
                                +-- For example, if the requested minimum and maximum values are close to equal,
                                +-- they will be separated by a small epsilon value to prevent ill-formed quantized
                                +-- buffers from being created. Otherwise, you can end up with buffers where all the
                                +-- quantized values map to the same float value, which causes problems for
                                +-- operations that have to perform further calculations on them.
                                +quantizeV2 :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                              Data.Word.Word16,
                                +                                              Data.Word.Word8] t) => 
                                +              Tensor v'1 Float -- ^ __input__
                                +              -> Tensor v'2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input.
                                +              -> Tensor v'3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input.
                                +              -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +              -- ^ (__output__, __output_min__, __output_max__)
                                +              --
                                +              -- * __output__: The quantized data produced from the float input.
                                +              --
                                +              -- * __output_min__: The actual minimum scalar value used for the output.
                                +              --
                                +              -- * __output_max__: The actual maximum scalar value used for the output.
                                +quantizeV2 = quantizeV2' id
                                +quantizeV2' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                               Data.Word.Word16,
                                +                                               Data.Word.Word8] t) =>
                                +               OpParams ->
                                +               Tensor v'1 Float -- ^ __input__
                                +               -> Tensor v'2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input.
                                +               -> Tensor v'3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input.
                                +               -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +               -- ^ (__output__, __output_min__, __output_max__)
                                +               --
                                +               -- * __output__: The quantized data produced from the float input.
                                +               --
                                +               -- * __output_min__: The actual minimum scalar value used for the output.
                                +               --
                                +               -- * __output_max__: The actual maximum scalar value used for the output.
                                +quantizeV2' op'options input min_range max_range | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs min_range,
                                +                                                             buildInputs max_range]
                                +        return (opDef "QuantizeV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type: DT_FLOAT }
                                +input_arg {
                                +  name: "min_range"
                                +  description: "The minimum scalar value possibly produced for the input."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_range"
                                +  description: "The maximum scalar value possibly produced for the input."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The quantized data produced from the float input."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_min"
                                +  description: "The actual minimum scalar value used for the output."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output_max"
                                +  description: "The actual maximum scalar value used for the output."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "mode"
                                +  type: "string"
                                +  default_value { s: "MIN_COMBINED" }
                                +  allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" } }
                                +}
                                +-}
                                +
                                +-- | Returns x + y element-wise, working on quantized buffers.
                                +
                                +quantizedAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2
                                +                toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Word.Word16, Data.Word.Word8] t1,
                                +                           OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Word.Word16, Data.Word.Word8] t2,
                                +                           OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Word.Word16,
                                +                                   Data.Word.Word8] toutput) => 
                                +                Tensor v'1 t1 -- ^ __x__
                                +                -> Tensor v'2 t2 -- ^ __y__
                                +                -> Tensor v'3 Float -- ^ __min_x__: The float value that the lowest quantized `x` value represents.
                                +                -> Tensor v'4 Float -- ^ __max_x__: The float value that the highest quantized `x` value represents.
                                +                -> Tensor v'5 Float -- ^ __min_y__: The float value that the lowest quantized `y` value represents.
                                +                -> Tensor v'6 Float -- ^ __max_y__: The float value that the highest quantized `y` value represents.
                                +                -> (Tensor Build toutput, Tensor Build Float,
                                +                    Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__)
                                +                --
                                +                -- * __z__
                                +                --
                                +                -- * __min_z__: The float value that the lowest quantized output value represents.
                                +                --
                                +                -- * __max_z__: The float value that the highest quantized output value represents.
                                +                -- 
                                +                -- *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
                                +                -- broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +quantizedAdd = quantizedAdd' id
                                +quantizedAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2
                                +                 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Word.Word16, Data.Word.Word8] t1,
                                +                            OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Word.Word16, Data.Word.Word8] t2,
                                +                            OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Word.Word16,
                                +                                    Data.Word.Word8] toutput) => OpParams ->
                                +                 Tensor v'1 t1 -- ^ __x__
                                +                 -> Tensor v'2 t2 -- ^ __y__
                                +                 -> Tensor v'3 Float -- ^ __min_x__: The float value that the lowest quantized `x` value represents.
                                +                 -> Tensor v'4 Float -- ^ __max_x__: The float value that the highest quantized `x` value represents.
                                +                 -> Tensor v'5 Float -- ^ __min_y__: The float value that the lowest quantized `y` value represents.
                                +                 -> Tensor v'6 Float -- ^ __max_y__: The float value that the highest quantized `y` value represents.
                                +                 -> (Tensor Build toutput, Tensor Build Float,
                                +                     Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__)
                                +                 --
                                +                 -- * __z__
                                +                 --
                                +                 -- * __min_z__: The float value that the lowest quantized output value represents.
                                +                 --
                                +                 -- * __max_z__: The float value that the highest quantized output value represents.
                                +                 -- 
                                +                 -- *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
                                +                 -- broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +quantizedAdd' op'options x y min_x max_x min_y max_y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y,
                                +                                                             buildInputs min_x,
                                +                                                             buildInputs max_x,
                                +                                                             buildInputs min_y,
                                +                                                             buildInputs max_y]
                                +        return (opDef "QuantizedAdd"
                                +                & opAttr "T1" .~ tensorType (undefined :: t1)
                                +                & opAttr "T2" .~ tensorType (undefined :: t2)
                                +                & opAttr "Toutput" .~ tensorType (undefined :: toutput)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T1" }
                                +input_arg { name: "y" type_attr: "T2" }
                                +input_arg {
                                +  name: "min_x"
                                +  description: "The float value that the lowest quantized `x` value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_x"
                                +  description: "The float value that the highest quantized `x` value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "min_y"
                                +  description: "The float value that the lowest quantized `y` value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_y"
                                +  description: "The float value that the highest quantized `y` value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "z" type_attr: "Toutput" }
                                +output_arg {
                                +  name: "min_z"
                                +  description: "The float value that the lowest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_z"
                                +  description: "The float value that the highest quantized output value represents.\n\n*NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about\nbroadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)"
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T1"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "T2"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Toutput"
                                +  type: "type"
                                +  default_value { type: DT_QINT32 }
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Produces the average pool of the input tensor for quantized types.
                                +
                                +quantizedAvgPool :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                    Data.Int.Int32,
                                +                                                    Data.Word.Word16,
                                +                                                    Data.Word.Word8] t) => 
                                +                    Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`.
                                +                    -> Tensor v'2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
                                +                    -> Tensor v'3 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
                                +                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +                    -- ^ (__output__, __min_output__, __max_output__)
                                +                    --
                                +                    -- * __output__
                                +                    --
                                +                    -- * __min_output__: The float value that the lowest quantized output value represents.
                                +                    --
                                +                    -- * __max_output__: The float value that the highest quantized output value represents.
                                +quantizedAvgPool = quantizedAvgPool' id
                                +quantizedAvgPool' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8] t) =>
                                +                     OpParams ->
                                +                     Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`.
                                +                     -> Tensor v'2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
                                +                     -> Tensor v'3 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
                                +                     -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +                     -- ^ (__output__, __min_output__, __max_output__)
                                +                     --
                                +                     -- * __output__
                                +                     --
                                +                     -- * __min_output__: The float value that the lowest quantized output value represents.
                                +                     --
                                +                     -- * __max_output__: The float value that the highest quantized output value represents.
                                +quantizedAvgPool' op'options input min_input max_input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs min_input,
                                +                                                             buildInputs max_input]
                                +        return (opDef "QuantizedAvgPool"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "min_input"
                                +  description: "The float value that the lowest quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_input"
                                +  description: "The float value that the highest quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +output_arg {
                                +  name: "min_output"
                                +  description: "The float value that the lowest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_output"
                                +  description: "The float value that the highest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input."
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the input\ntensor.  The length must be 4 to match the number of dimensions of the input."
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Quantized Batch normalization.
                                +--
                                +-- This op is deprecated and will be removed in the future. Prefer
                                +-- `tf.nn.batch_normalization`.
                                +quantizedBatchNormWithGlobalNormalization :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7
                                +                                             v'8 v'9 v'10 v'11 v'12 v'13 v'14
                                +                                             v'15 tinput
                                +                                             out_type . (OneOf '[Data.Int.Int16,
                                +                                                                 Data.Int.Int32,
                                +                                                                 Data.Word.Word16,
                                +                                                                 Data.Word.Word8] tinput,
                                +                                                         OneOf '[Data.Int.Int16,
                                +                                                                 Data.Int.Int32,
                                +                                                                 Data.Word.Word16,
                                +                                                                 Data.Word.Word8] out_type) =>
                                +                                             
                                +                                             Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
                                +                                                  -- needs to be multiplied with gamma.
                                +                                             -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
                                +                                             -> Tensor v'1 tinput -- ^ __t__: A 4D input Tensor.
                                +                                             -> Tensor v'2 Float -- ^ __t_min__: The value represented by the lowest quantized input.
                                +                                             -> Tensor v'3 Float -- ^ __t_max__: The value represented by the highest quantized input.
                                +                                             -> Tensor v'4 tinput -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
                                +                                                                  -- This is the first output from tf.nn.moments,
                                +                                                                  -- or a saved moving average thereof.
                                +                                             -> Tensor v'5 Float -- ^ __m_min__: The value represented by the lowest quantized mean.
                                +                                             -> Tensor v'6 Float -- ^ __m_max__: The value represented by the highest quantized mean.
                                +                                             -> Tensor v'7 tinput -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
                                +                                                                  -- This is the second output from tf.nn.moments,
                                +                                                                  -- or a saved moving average thereof.
                                +                                             -> Tensor v'8 Float -- ^ __v_min__: The value represented by the lowest quantized variance.
                                +                                             -> Tensor v'9 Float -- ^ __v_max__: The value represented by the highest quantized variance.
                                +                                             -> Tensor v'10 tinput -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t.
                                +                                                                   -- An offset to be added to the normalized tensor.
                                +                                             -> Tensor v'11 Float -- ^ __beta_min__: The value represented by the lowest quantized offset.
                                +                                             -> Tensor v'12 Float -- ^ __beta_max__: The value represented by the highest quantized offset.
                                +                                             -> Tensor v'13 tinput -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
                                +                                                                   -- If "scale_after_normalization" is true, this tensor will be multiplied
                                +                                                                   -- with the normalized tensor.
                                +                                             -> Tensor v'14 Float -- ^ __gamma_min__: The value represented by the lowest quantized gamma.
                                +                                             -> Tensor v'15 Float -- ^ __gamma_max__: The value represented by the highest quantized gamma.
                                +                                             -> (Tensor Build out_type,
                                +                                                 Tensor Build Float,
                                +                                                 Tensor Build Float)
                                +                                             -- ^ (__result__, __result_min__, __result_max__)
                                +                                             --
                                +                                             -- * __result__
                                +                                             --
                                +                                             -- * __result_min__
                                +                                             --
                                +                                             -- * __result_max__
                                +quantizedBatchNormWithGlobalNormalization = quantizedBatchNormWithGlobalNormalization' id
                                +quantizedBatchNormWithGlobalNormalization' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7
                                +                                              v'8 v'9 v'10 v'11 v'12 v'13 v'14
                                +                                              v'15 tinput
                                +                                              out_type . (OneOf '[Data.Int.Int16,
                                +                                                                  Data.Int.Int32,
                                +                                                                  Data.Word.Word16,
                                +                                                                  Data.Word.Word8] tinput,
                                +                                                          OneOf '[Data.Int.Int16,
                                +                                                                  Data.Int.Int32,
                                +                                                                  Data.Word.Word16,
                                +                                                                  Data.Word.Word8] out_type) =>
                                +                                              OpParams ->
                                +                                              Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
                                +                                                   -- needs to be multiplied with gamma.
                                +                                              -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
                                +                                              -> Tensor v'1 tinput -- ^ __t__: A 4D input Tensor.
                                +                                              -> Tensor v'2 Float -- ^ __t_min__: The value represented by the lowest quantized input.
                                +                                              -> Tensor v'3 Float -- ^ __t_max__: The value represented by the highest quantized input.
                                +                                              -> Tensor v'4 tinput -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
                                +                                                                   -- This is the first output from tf.nn.moments,
                                +                                                                   -- or a saved moving average thereof.
                                +                                              -> Tensor v'5 Float -- ^ __m_min__: The value represented by the lowest quantized mean.
                                +                                              -> Tensor v'6 Float -- ^ __m_max__: The value represented by the highest quantized mean.
                                +                                              -> Tensor v'7 tinput -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
                                +                                                                   -- This is the second output from tf.nn.moments,
                                +                                                                   -- or a saved moving average thereof.
                                +                                              -> Tensor v'8 Float -- ^ __v_min__: The value represented by the lowest quantized variance.
                                +                                              -> Tensor v'9 Float -- ^ __v_max__: The value represented by the highest quantized variance.
                                +                                              -> Tensor v'10 tinput -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t.
                                +                                                                    -- An offset to be added to the normalized tensor.
                                +                                              -> Tensor v'11 Float -- ^ __beta_min__: The value represented by the lowest quantized offset.
                                +                                              -> Tensor v'12 Float -- ^ __beta_max__: The value represented by the highest quantized offset.
                                +                                              -> Tensor v'13 tinput -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
                                +                                                                    -- If "scale_after_normalization" is true, this tensor will be multiplied
                                +                                                                    -- with the normalized tensor.
                                +                                              -> Tensor v'14 Float -- ^ __gamma_min__: The value represented by the lowest quantized gamma.
                                +                                              -> Tensor v'15 Float -- ^ __gamma_max__: The value represented by the highest quantized gamma.
                                +                                              -> (Tensor Build out_type,
                                +                                                  Tensor Build Float,
                                +                                                  Tensor Build Float)
                                +                                              -- ^ (__result__, __result_min__, __result_max__)
                                +                                              --
                                +                                              -- * __result__
                                +                                              --
                                +                                              -- * __result_min__
                                +                                              --
                                +                                              -- * __result_max__
                                +quantizedBatchNormWithGlobalNormalization' op'options scale_after_normalization
                                +                                           variance_epsilon t t_min t_max m
                                +                                           m_min m_max v v_min v_max beta
                                +                                           beta_min beta_max gamma gamma_min
                                +                                           gamma_max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t,
                                +                                                             buildInputs t_min,
                                +                                                             buildInputs t_max,
                                +                                                             buildInputs m,
                                +                                                             buildInputs m_min,
                                +                                                             buildInputs m_max,
                                +                                                             buildInputs v,
                                +                                                             buildInputs v_min,
                                +                                                             buildInputs v_max,
                                +                                                             buildInputs beta,
                                +                                                             buildInputs beta_min,
                                +                                                             buildInputs beta_max,
                                +                                                             buildInputs gamma,
                                +                                                             buildInputs gamma_min,
                                +                                                             buildInputs gamma_max]
                                +        return (opDef "QuantizedBatchNormWithGlobalNormalization"
                                +                & opAttr "Tinput" .~ tensorType (undefined :: tinput)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & opAttr "scale_after_normalization" .~ scale_after_normalization
                                +                & opAttr "variance_epsilon" .~ variance_epsilon
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "t" description: "A 4D input Tensor." type_attr: "Tinput"
                                +}
                                +input_arg {
                                +  name: "t_min"
                                +  description: "The value represented by the lowest quantized input."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "t_max"
                                +  description: "The value represented by the highest quantized input."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "m"
                                +  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
                                +  type_attr: "Tinput"
                                +}
                                +input_arg {
                                +  name: "m_min"
                                +  description: "The value represented by the lowest quantized mean."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "m_max"
                                +  description: "The value represented by the highest quantized mean."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "v"
                                +  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
                                +  type_attr: "Tinput"
                                +}
                                +input_arg {
                                +  name: "v_min"
                                +  description: "The value represented by the lowest quantized variance."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "v_max"
                                +  description: "The value represented by the highest quantized variance."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "beta"
                                +  description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor."
                                +  type_attr: "Tinput"
                                +}
                                +input_arg {
                                +  name: "beta_min"
                                +  description: "The value represented by the lowest quantized offset."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "beta_max"
                                +  description: "The value represented by the highest quantized offset."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "gamma"
                                +  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor."
                                +  type_attr: "Tinput"
                                +}
                                +input_arg {
                                +  name: "gamma_min"
                                +  description: "The value represented by the lowest quantized gamma."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "gamma_max"
                                +  description: "The value represented by the highest quantized gamma."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "result" type_attr: "out_type" }
                                +output_arg { name: "result_min" type: DT_FLOAT }
                                +output_arg { name: "result_max" type: DT_FLOAT }
                                +attr {
                                +  name: "Tinput"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "variance_epsilon"
                                +  type: "float"
                                +  description: "A small float number to avoid dividing by 0."
                                +}
                                +attr {
                                +  name: "scale_after_normalization"
                                +  type: "bool"
                                +  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
                                +}
                                +-}
                                +
                                +-- | Adds Tensor 'bias' to Tensor 'input' for Quantized types.
                                +--
                                +-- Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
                                +quantizedBiasAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2
                                +                    out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Word.Word16, Data.Word.Word8] t1,
                                +                                OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Word.Word16, Data.Word.Word8] t2,
                                +                                OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Word.Word16,
                                +                                        Data.Word.Word8] out_type) => 
                                +                    Tensor v'1 t1 -- ^ __input__
                                +                    -> Tensor v'2 t2 -- ^ __bias__: A 1D bias Tensor with size matching the last dimension of 'input'.
                                +                    -> Tensor v'3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
                                +                    -> Tensor v'4 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
                                +                    -> Tensor v'5 Float -- ^ __min_bias__: The float value that the lowest quantized bias value represents.
                                +                    -> Tensor v'6 Float -- ^ __max_bias__: The float value that the highest quantized bias value represents.
                                +                    -> (Tensor Build out_type, Tensor Build Float,
                                +                        Tensor Build Float)
                                +                    -- ^ (__output__, __min_out__, __max_out__)
                                +                    --
                                +                    -- * __output__
                                +                    --
                                +                    -- * __min_out__: The float value that the lowest quantized output value represents.
                                +                    --
                                +                    -- * __max_out__: The float value that the highest quantized output value represents.
                                +quantizedBiasAdd = quantizedBiasAdd' id
                                +quantizedBiasAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2
                                +                     out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Word.Word16, Data.Word.Word8] t1,
                                +                                 OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Word.Word16, Data.Word.Word8] t2,
                                +                                 OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Word.Word16,
                                +                                         Data.Word.Word8] out_type) =>
                                +                     OpParams ->
                                +                     Tensor v'1 t1 -- ^ __input__
                                +                     -> Tensor v'2 t2 -- ^ __bias__: A 1D bias Tensor with size matching the last dimension of 'input'.
                                +                     -> Tensor v'3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
                                +                     -> Tensor v'4 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
                                +                     -> Tensor v'5 Float -- ^ __min_bias__: The float value that the lowest quantized bias value represents.
                                +                     -> Tensor v'6 Float -- ^ __max_bias__: The float value that the highest quantized bias value represents.
                                +                     -> (Tensor Build out_type, Tensor Build Float,
                                +                         Tensor Build Float)
                                +                     -- ^ (__output__, __min_out__, __max_out__)
                                +                     --
                                +                     -- * __output__
                                +                     --
                                +                     -- * __min_out__: The float value that the lowest quantized output value represents.
                                +                     --
                                +                     -- * __max_out__: The float value that the highest quantized output value represents.
                                +quantizedBiasAdd' op'options input bias min_input max_input min_bias
                                +                  max_bias | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs bias,
                                +                                                             buildInputs min_input,
                                +                                                             buildInputs max_input,
                                +                                                             buildInputs min_bias,
                                +                                                             buildInputs max_bias]
                                +        return (opDef "QuantizedBiasAdd"
                                +                & opAttr "T1" .~ tensorType (undefined :: t1)
                                +                & opAttr "T2" .~ tensorType (undefined :: t2)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T1" }
                                +input_arg {
                                +  name: "bias"
                                +  description: "A 1D bias Tensor with size matching the last dimension of \'input\'."
                                +  type_attr: "T2"
                                +}
                                +input_arg {
                                +  name: "min_input"
                                +  description: "The float value that the lowest quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_input"
                                +  description: "The float value that the highest quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "min_bias"
                                +  description: "The float value that the lowest quantized bias value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_bias"
                                +  description: "The float value that the highest quantized bias value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "output" type_attr: "out_type" }
                                +output_arg {
                                +  name: "min_out"
                                +  description: "The float value that the lowest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_out"
                                +  description: "The float value that the highest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T1"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "T2"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Concatenates quantized tensors along one dimension.
                                +
                                +quantizedConcat :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => 
                                +                   Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: 0-D.  The dimension along which to concatenate.  Must be in the
                                +                                             -- range [0, rank(values)).
                                +                   -> [Tensor v'2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match,
                                +                                     -- and their sizes must match in all dimensions except `concat_dim`.
                                +                   -> [Tensor v'3 Float] -- ^ __input_mins__: The minimum scalar values for each of the input tensors.
                                +                   -> [Tensor v'4 Float] -- ^ __input_maxes__: The maximum scalar values for each of the input tensors.
                                +                   -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +                   -- ^ (__output__, __output_min__, __output_max__)
                                +                   --
                                +                   -- * __output__: A `Tensor` with the concatenation of values stacked along the
                                +                   -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
                                +                   -- in `concat_dim` where it has the sum of the sizes.
                                +                   --
                                +                   -- * __output_min__: The float value that the minimum quantized output value represents.
                                +                   --
                                +                   -- * __output_max__: The float value that the maximum quantized output value represents.
                                +quantizedConcat = quantizedConcat' id
                                +quantizedConcat' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams ->
                                +                    Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: 0-D.  The dimension along which to concatenate.  Must be in the
                                +                                              -- range [0, rank(values)).
                                +                    -> [Tensor v'2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match,
                                +                                      -- and their sizes must match in all dimensions except `concat_dim`.
                                +                    -> [Tensor v'3 Float] -- ^ __input_mins__: The minimum scalar values for each of the input tensors.
                                +                    -> [Tensor v'4 Float] -- ^ __input_maxes__: The maximum scalar values for each of the input tensors.
                                +                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +                    -- ^ (__output__, __output_min__, __output_max__)
                                +                    --
                                +                    -- * __output__: A `Tensor` with the concatenation of values stacked along the
                                +                    -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
                                +                    -- in `concat_dim` where it has the sum of the sizes.
                                +                    --
                                +                    -- * __output_min__: The float value that the minimum quantized output value represents.
                                +                    --
                                +                    -- * __output_max__: The float value that the maximum quantized output value represents.
                                +quantizedConcat' op'options concat_dim values input_mins
                                +                 input_maxes | eqLengthGuard [("N", [("values", length values),
                                +                                                     ("input_mins", length input_mins),
                                +                                                     ("input_maxes", length input_maxes)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat_dim,
                                +                                                             buildInputs values,
                                +                                                             buildInputs input_mins,
                                +                                                             buildInputs input_maxes]
                                +        return (opDef "QuantizedConcat"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length values) :: Int64
                                +{-
                                +input_arg {
                                +  name: "concat_dim"
                                +  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +}
                                +input_arg {
                                +  name: "input_mins"
                                +  description: "The minimum scalar values for each of the input tensors."
                                +  type: DT_FLOAT
                                +  number_attr: "N"
                                +}
                                +input_arg {
                                +  name: "input_maxes"
                                +  description: "The maximum scalar values for each of the input tensors."
                                +  type: DT_FLOAT
                                +  number_attr: "N"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_min"
                                +  description: "The float value that the minimum quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output_max"
                                +  description: "The float value that the maximum quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 2 }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Computes a 2D convolution given quantized 4D input and filter tensors.
                                +--
                                +-- The inputs are quantized tensors where the lowest value represents the real
                                +-- number of the associated minimum, and the highest represents the maximum.
                                +-- This means that you can only interpret the quantized output in the same way, by
                                +-- taking the returned minimum and maximum values into account.
                                +quantizedConv2D :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter
                                +                   out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Word.Word16,
                                +                                       Data.Word.Word8] tinput,
                                +                               OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Word.Word16,
                                +                                       Data.Word.Word8] tfilter,
                                +                               OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Word.Word16,
                                +                                       Data.Word.Word8] out_type) => 
                                +                   Tensor v'1 tinput -- ^ __input__
                                +                   -> Tensor v'2 tfilter -- ^ __filter__: filter's input_depth dimension must match input's depth dimensions.
                                +                   -> Tensor v'3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
                                +                   -> Tensor v'4 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
                                +                   -> Tensor v'5 Float -- ^ __min_filter__: The float value that the lowest quantized filter value represents.
                                +                   -> Tensor v'6 Float -- ^ __max_filter__: The float value that the highest quantized filter value represents.
                                +                   -> (Tensor Build out_type, Tensor Build Float,
                                +                       Tensor Build Float)
                                +                   -- ^ (__output__, __min_output__, __max_output__)
                                +                   --
                                +                   -- * __output__
                                +                   --
                                +                   -- * __min_output__: The float value that the lowest quantized output value represents.
                                +                   --
                                +                   -- * __max_output__: The float value that the highest quantized output value represents.
                                +quantizedConv2D = quantizedConv2D' id
                                +quantizedConv2D' :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter
                                +                    out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Word.Word16,
                                +                                        Data.Word.Word8] tinput,
                                +                                OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Word.Word16,
                                +                                        Data.Word.Word8] tfilter,
                                +                                OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Word.Word16,
                                +                                        Data.Word.Word8] out_type) =>
                                +                    OpParams ->
                                +                    Tensor v'1 tinput -- ^ __input__
                                +                    -> Tensor v'2 tfilter -- ^ __filter__: filter's input_depth dimension must match input's depth dimensions.
                                +                    -> Tensor v'3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
                                +                    -> Tensor v'4 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
                                +                    -> Tensor v'5 Float -- ^ __min_filter__: The float value that the lowest quantized filter value represents.
                                +                    -> Tensor v'6 Float -- ^ __max_filter__: The float value that the highest quantized filter value represents.
                                +                    -> (Tensor Build out_type, Tensor Build Float,
                                +                        Tensor Build Float)
                                +                    -- ^ (__output__, __min_output__, __max_output__)
                                +                    --
                                +                    -- * __output__
                                +                    --
                                +                    -- * __min_output__: The float value that the lowest quantized output value represents.
                                +                    --
                                +                    -- * __max_output__: The float value that the highest quantized output value represents.
                                +quantizedConv2D' op'options input filter min_input max_input min_filter
                                +                 max_filter | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs filter,
                                +                                                             buildInputs min_input,
                                +                                                             buildInputs max_input,
                                +                                                             buildInputs min_filter,
                                +                                                             buildInputs max_filter]
                                +        return (opDef "QuantizedConv2D"
                                +                & opAttr "Tinput" .~ tensorType (undefined :: tinput)
                                +                & opAttr "Tfilter" .~ tensorType (undefined :: tfilter)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "Tinput" }
                                +input_arg {
                                +  name: "filter"
                                +  description: "filter\'s input_depth dimension must match input\'s depth dimensions."
                                +  type_attr: "Tfilter"
                                +}
                                +input_arg {
                                +  name: "min_input"
                                +  description: "The float value that the lowest quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_input"
                                +  description: "The float value that the highest quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "min_filter"
                                +  description: "The float value that the lowest quantized filter value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_filter"
                                +  description: "The float value that the highest quantized filter value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "output" type_attr: "out_type" }
                                +output_arg {
                                +  name: "min_output"
                                +  description: "The float value that the lowest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_output"
                                +  description: "The float value that the highest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "Tinput"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tfilter"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  default_value { type: DT_QINT32 }
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the input\ntensor."
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Quantized Instance normalization.
                                +
                                +quantizedInstanceNorm :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                         Data.Int.Int32,
                                +                                                         Data.Word.Word16,
                                +                                                         Data.Word.Word8] t) => 
                                +                         Tensor v'1 t -- ^ __x__: A 4D input Tensor.
                                +                         -> Tensor v'2 Float -- ^ __x_min__: The value represented by the lowest quantized input.
                                +                         -> Tensor v'3 Float -- ^ __x_max__: The value represented by the highest quantized input.
                                +                         -> (Tensor Build t, Tensor Build Float,
                                +                             Tensor Build Float)
                                +                         -- ^ (__y__, __y_min__, __y_max__)
                                +                         --
                                +                         -- * __y__: A 4D Tensor.
                                +                         --
                                +                         -- * __y_min__: The value represented by the lowest quantized output.
                                +                         --
                                +                         -- * __y_max__: The value represented by the highest quantized output.
                                +quantizedInstanceNorm = quantizedInstanceNorm' id
                                +quantizedInstanceNorm' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                          Data.Int.Int32,
                                +                                                          Data.Word.Word16,
                                +                                                          Data.Word.Word8] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 t -- ^ __x__: A 4D input Tensor.
                                +                          -> Tensor v'2 Float -- ^ __x_min__: The value represented by the lowest quantized input.
                                +                          -> Tensor v'3 Float -- ^ __x_max__: The value represented by the highest quantized input.
                                +                          -> (Tensor Build t, Tensor Build Float,
                                +                              Tensor Build Float)
                                +                          -- ^ (__y__, __y_min__, __y_max__)
                                +                          --
                                +                          -- * __y__: A 4D Tensor.
                                +                          --
                                +                          -- * __y_min__: The value represented by the lowest quantized output.
                                +                          --
                                +                          -- * __y_max__: The value represented by the highest quantized output.
                                +quantizedInstanceNorm' op'options x x_min x_max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs x_min,
                                +                                                             buildInputs x_max]
                                +        return (opDef "QuantizedInstanceNorm"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "x" description: "A 4D input Tensor." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "x_min"
                                +  description: "The value represented by the lowest quantized input."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "x_max"
                                +  description: "The value represented by the highest quantized input."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "y" description: "A 4D Tensor." type_attr: "T" }
                                +output_arg {
                                +  name: "y_min"
                                +  description: "The value represented by the lowest quantized output."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "y_max"
                                +  description: "The value represented by the highest quantized output."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "output_range_given"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, `given_y_min` and `given_y_min`\nand `given_y_max` are used as the output range. Otherwise,\nthe implementation computes the output range."
                                +}
                                +attr {
                                +  name: "given_y_min"
                                +  type: "float"
                                +  default_value { f: 0.0 }
                                +  description: "Output in `y_min` if `output_range_given` is True."
                                +}
                                +attr {
                                +  name: "given_y_max"
                                +  type: "float"
                                +  default_value { f: 0.0 }
                                +  description: "Output in `y_max` if `output_range_given` is True."
                                +}
                                +attr {
                                +  name: "variance_epsilon"
                                +  type: "float"
                                +  default_value { f: 1.0e-5 }
                                +  description: "A small float number to avoid dividing by 0."
                                +}
                                +attr {
                                +  name: "min_separation"
                                +  type: "float"
                                +  default_value { f: 1.0e-3 }
                                +  description: "Minimum value of `y_max - y_min`"
                                +}
                                +-}
                                +
                                +-- | Perform a quantized matrix multiplication of  `a` by the matrix `b`.
                                +--
                                +-- The inputs must be two-dimensional matrices and the inner dimension of
                                +-- `a` (after being transposed if `transpose_a` is non-zero) must match the
                                +-- outer dimension of `b` (after being transposed if `transposed_b` is
                                +-- non-zero).
                                +quantizedMatMul :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2
                                +                   toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Word.Word16, Data.Word.Word8] t1,
                                +                              OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Word.Word16, Data.Word.Word8] t2,
                                +                              OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Word.Word16,
                                +                                      Data.Word.Word8] toutput) => 
                                +                   Tensor v'1 t1 -- ^ __a__: Must be a two-dimensional tensor.
                                +                   -> Tensor v'2 t2 -- ^ __b__: Must be a two-dimensional tensor.
                                +                   -> Tensor v'3 Float -- ^ __min_a__: The float value that the lowest quantized `a` value represents.
                                +                   -> Tensor v'4 Float -- ^ __max_a__: The float value that the highest quantized `a` value represents.
                                +                   -> Tensor v'5 Float -- ^ __min_b__: The float value that the lowest quantized `b` value represents.
                                +                   -> Tensor v'6 Float -- ^ __max_b__: The float value that the highest quantized `b` value represents.
                                +                   -> (Tensor Build toutput, Tensor Build Float,
                                +                       Tensor Build Float)
                                +                   -- ^ (__out__, __min_out__, __max_out__)
                                +                   --
                                +                   -- * __out__
                                +                   --
                                +                   -- * __min_out__: The float value that the lowest quantized output value represents.
                                +                   --
                                +                   -- * __max_out__: The float value that the highest quantized output value represents.
                                +quantizedMatMul = quantizedMatMul' id
                                +quantizedMatMul' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2
                                +                    toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Word.Word16, Data.Word.Word8] t1,
                                +                               OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Word.Word16, Data.Word.Word8] t2,
                                +                               OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Word.Word16,
                                +                                       Data.Word.Word8] toutput) => OpParams ->
                                +                    Tensor v'1 t1 -- ^ __a__: Must be a two-dimensional tensor.
                                +                    -> Tensor v'2 t2 -- ^ __b__: Must be a two-dimensional tensor.
                                +                    -> Tensor v'3 Float -- ^ __min_a__: The float value that the lowest quantized `a` value represents.
                                +                    -> Tensor v'4 Float -- ^ __max_a__: The float value that the highest quantized `a` value represents.
                                +                    -> Tensor v'5 Float -- ^ __min_b__: The float value that the lowest quantized `b` value represents.
                                +                    -> Tensor v'6 Float -- ^ __max_b__: The float value that the highest quantized `b` value represents.
                                +                    -> (Tensor Build toutput, Tensor Build Float,
                                +                        Tensor Build Float)
                                +                    -- ^ (__out__, __min_out__, __max_out__)
                                +                    --
                                +                    -- * __out__
                                +                    --
                                +                    -- * __min_out__: The float value that the lowest quantized output value represents.
                                +                    --
                                +                    -- * __max_out__: The float value that the highest quantized output value represents.
                                +quantizedMatMul' op'options a b min_a max_a min_b max_b | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a,
                                +                                                             buildInputs b,
                                +                                                             buildInputs min_a,
                                +                                                             buildInputs max_a,
                                +                                                             buildInputs min_b,
                                +                                                             buildInputs max_b]
                                +        return (opDef "QuantizedMatMul"
                                +                & opAttr "T1" .~ tensorType (undefined :: t1)
                                +                & opAttr "T2" .~ tensorType (undefined :: t2)
                                +                & opAttr "Toutput" .~ tensorType (undefined :: toutput)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "a"
                                +  description: "Must be a two-dimensional tensor."
                                +  type_attr: "T1"
                                +}
                                +input_arg {
                                +  name: "b"
                                +  description: "Must be a two-dimensional tensor."
                                +  type_attr: "T2"
                                +}
                                +input_arg {
                                +  name: "min_a"
                                +  description: "The float value that the lowest quantized `a` value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_a"
                                +  description: "The float value that the highest quantized `a` value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "min_b"
                                +  description: "The float value that the lowest quantized `b` value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_b"
                                +  description: "The float value that the highest quantized `b` value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "out" type_attr: "Toutput" }
                                +output_arg {
                                +  name: "min_out"
                                +  description: "The float value that the lowest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_out"
                                +  description: "The float value that the highest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T1"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "T2"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Toutput"
                                +  type: "type"
                                +  default_value { type: DT_QINT32 }
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "transpose_a"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, `a` is transposed before multiplication."
                                +}
                                +attr {
                                +  name: "transpose_b"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, `b` is transposed before multiplication."
                                +}
                                +attr {
                                +  name: "Tactivation"
                                +  type: "type"
                                +  default_value { type: DT_QUINT8 }
                                +  description: "The type of output produced by activation function\nfollowing this operation."
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Produces the max pool of the input tensor for quantized types.
                                +
                                +quantizedMaxPool :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                    Data.Int.Int32,
                                +                                                    Data.Word.Word16,
                                +                                                    Data.Word.Word8] t) => 
                                +                    Tensor v'1 t -- ^ __input__: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
                                +                    -> Tensor v'2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
                                +                    -> Tensor v'3 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
                                +                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +                    -- ^ (__output__, __min_output__, __max_output__)
                                +                    --
                                +                    -- * __output__
                                +                    --
                                +                    -- * __min_output__: The float value that the lowest quantized output value represents.
                                +                    --
                                +                    -- * __max_output__: The float value that the highest quantized output value represents.
                                +quantizedMaxPool = quantizedMaxPool' id
                                +quantizedMaxPool' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8] t) =>
                                +                     OpParams ->
                                +                     Tensor v'1 t -- ^ __input__: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
                                +                     -> Tensor v'2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
                                +                     -> Tensor v'3 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
                                +                     -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +                     -- ^ (__output__, __min_output__, __max_output__)
                                +                     --
                                +                     -- * __output__
                                +                     --
                                +                     -- * __min_output__: The float value that the lowest quantized output value represents.
                                +                     --
                                +                     -- * __max_output__: The float value that the highest quantized output value represents.
                                +quantizedMaxPool' op'options input min_input max_input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs min_input,
                                +                                                             buildInputs max_input]
                                +        return (opDef "QuantizedMaxPool"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "The 4D (batch x rows x cols x depth) Tensor to MaxReduce over."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "min_input"
                                +  description: "The float value that the lowest quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_input"
                                +  description: "The float value that the highest quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +output_arg {
                                +  name: "min_output"
                                +  description: "The float value that the lowest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_output"
                                +  description: "The float value that the highest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "ksize"
                                +  type: "list(int)"
                                +  description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input."
                                +}
                                +attr {
                                +  name: "strides"
                                +  type: "list(int)"
                                +  description: "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input."
                                +}
                                +attr {
                                +  name: "padding"
                                +  type: "string"
                                +  description: "The type of padding algorithm to use."
                                +  allowed_values { list { s: "SAME" s: "VALID" } }
                                +}
                                +-}
                                +
                                +-- | Returns x * y element-wise, working on quantized buffers.
                                +
                                +quantizedMul :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2
                                +                toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Word.Word16, Data.Word.Word8] t1,
                                +                           OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Word.Word16, Data.Word.Word8] t2,
                                +                           OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Word.Word16,
                                +                                   Data.Word.Word8] toutput) => 
                                +                Tensor v'1 t1 -- ^ __x__
                                +                -> Tensor v'2 t2 -- ^ __y__
                                +                -> Tensor v'3 Float -- ^ __min_x__: The float value that the lowest quantized `x` value represents.
                                +                -> Tensor v'4 Float -- ^ __max_x__: The float value that the highest quantized `x` value represents.
                                +                -> Tensor v'5 Float -- ^ __min_y__: The float value that the lowest quantized `y` value represents.
                                +                -> Tensor v'6 Float -- ^ __max_y__: The float value that the highest quantized `y` value represents.
                                +                -> (Tensor Build toutput, Tensor Build Float,
                                +                    Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__)
                                +                --
                                +                -- * __z__
                                +                --
                                +                -- * __min_z__: The float value that the lowest quantized output value represents.
                                +                --
                                +                -- * __max_z__: The float value that the highest quantized output value represents.
                                +                -- 
                                +                -- *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
                                +                -- broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +quantizedMul = quantizedMul' id
                                +quantizedMul' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2
                                +                 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Word.Word16, Data.Word.Word8] t1,
                                +                            OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Word.Word16, Data.Word.Word8] t2,
                                +                            OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Word.Word16,
                                +                                    Data.Word.Word8] toutput) => OpParams ->
                                +                 Tensor v'1 t1 -- ^ __x__
                                +                 -> Tensor v'2 t2 -- ^ __y__
                                +                 -> Tensor v'3 Float -- ^ __min_x__: The float value that the lowest quantized `x` value represents.
                                +                 -> Tensor v'4 Float -- ^ __max_x__: The float value that the highest quantized `x` value represents.
                                +                 -> Tensor v'5 Float -- ^ __min_y__: The float value that the lowest quantized `y` value represents.
                                +                 -> Tensor v'6 Float -- ^ __max_y__: The float value that the highest quantized `y` value represents.
                                +                 -> (Tensor Build toutput, Tensor Build Float,
                                +                     Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__)
                                +                 --
                                +                 -- * __z__
                                +                 --
                                +                 -- * __min_z__: The float value that the lowest quantized output value represents.
                                +                 --
                                +                 -- * __max_z__: The float value that the highest quantized output value represents.
                                +                 -- 
                                +                 -- *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
                                +                 -- broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +quantizedMul' op'options x y min_x max_x min_y max_y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y,
                                +                                                             buildInputs min_x,
                                +                                                             buildInputs max_x,
                                +                                                             buildInputs min_y,
                                +                                                             buildInputs max_y]
                                +        return (opDef "QuantizedMul"
                                +                & opAttr "T1" .~ tensorType (undefined :: t1)
                                +                & opAttr "T2" .~ tensorType (undefined :: t2)
                                +                & opAttr "Toutput" .~ tensorType (undefined :: toutput)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T1" }
                                +input_arg { name: "y" type_attr: "T2" }
                                +input_arg {
                                +  name: "min_x"
                                +  description: "The float value that the lowest quantized `x` value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_x"
                                +  description: "The float value that the highest quantized `x` value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "min_y"
                                +  description: "The float value that the lowest quantized `y` value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_y"
                                +  description: "The float value that the highest quantized `y` value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "z" type_attr: "Toutput" }
                                +output_arg {
                                +  name: "min_z"
                                +  description: "The float value that the lowest quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_z"
                                +  description: "The float value that the highest quantized output value represents.\n\n*NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about\nbroadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)"
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T1"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "T2"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Toutput"
                                +  type: "type"
                                +  default_value { type: DT_QINT32 }
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes Quantized Rectified Linear: `max(features, 0)`
                                +
                                +quantizedRelu :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16,
                                +                                                               Data.Int.Int32,
                                +                                                               Data.Word.Word16,
                                +                                                               Data.Word.Word8] tinput,
                                +                                                       OneOf '[Data.Int.Int16,
                                +                                                               Data.Int.Int32,
                                +                                                               Data.Word.Word16,
                                +                                                               Data.Word.Word8] out_type) =>
                                +                 
                                +                 Tensor v'1 tinput -- ^ __features__
                                +                 -> Tensor v'2 Float -- ^ __min_features__: The float value that the lowest quantized value represents.
                                +                 -> Tensor v'3 Float -- ^ __max_features__: The float value that the highest quantized value represents.
                                +                 -> (Tensor Build out_type, Tensor Build Float,
                                +                     Tensor Build Float)
                                +                 -- ^ (__activations__, __min_activations__, __max_activations__)
                                +                 --
                                +                 -- * __activations__: Has the same output shape as "features".
                                +                 --
                                +                 -- * __min_activations__: The float value that the lowest quantized value represents.
                                +                 --
                                +                 -- * __max_activations__: The float value that the highest quantized value represents.
                                +quantizedRelu = quantizedRelu' id
                                +quantizedRelu' :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16,
                                +                                                                Data.Int.Int32,
                                +                                                                Data.Word.Word16,
                                +                                                                Data.Word.Word8] tinput,
                                +                                                        OneOf '[Data.Int.Int16,
                                +                                                                Data.Int.Int32,
                                +                                                                Data.Word.Word16,
                                +                                                                Data.Word.Word8] out_type) =>
                                +                  OpParams ->
                                +                  Tensor v'1 tinput -- ^ __features__
                                +                  -> Tensor v'2 Float -- ^ __min_features__: The float value that the lowest quantized value represents.
                                +                  -> Tensor v'3 Float -- ^ __max_features__: The float value that the highest quantized value represents.
                                +                  -> (Tensor Build out_type, Tensor Build Float,
                                +                      Tensor Build Float)
                                +                  -- ^ (__activations__, __min_activations__, __max_activations__)
                                +                  --
                                +                  -- * __activations__: Has the same output shape as "features".
                                +                  --
                                +                  -- * __min_activations__: The float value that the lowest quantized value represents.
                                +                  --
                                +                  -- * __max_activations__: The float value that the highest quantized value represents.
                                +quantizedRelu' op'options features min_features
                                +               max_features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features,
                                +                                                             buildInputs min_features,
                                +                                                             buildInputs max_features]
                                +        return (opDef "QuantizedRelu"
                                +                & opAttr "Tinput" .~ tensorType (undefined :: tinput)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "features" type_attr: "Tinput" }
                                +input_arg {
                                +  name: "min_features"
                                +  description: "The float value that the lowest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_features"
                                +  description: "The float value that the highest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "activations"
                                +  description: "Has the same output shape as \"features\"."
                                +  type_attr: "out_type"
                                +}
                                +output_arg {
                                +  name: "min_activations"
                                +  description: "The float value that the lowest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_activations"
                                +  description: "The float value that the highest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "Tinput"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  default_value { type: DT_QUINT8 }
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
                                +
                                +quantizedRelu6 :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16,
                                +                                                                Data.Int.Int32,
                                +                                                                Data.Word.Word16,
                                +                                                                Data.Word.Word8] tinput,
                                +                                                        OneOf '[Data.Int.Int16,
                                +                                                                Data.Int.Int32,
                                +                                                                Data.Word.Word16,
                                +                                                                Data.Word.Word8] out_type) =>
                                +                  
                                +                  Tensor v'1 tinput -- ^ __features__
                                +                  -> Tensor v'2 Float -- ^ __min_features__: The float value that the lowest quantized value represents.
                                +                  -> Tensor v'3 Float -- ^ __max_features__: The float value that the highest quantized value represents.
                                +                  -> (Tensor Build out_type, Tensor Build Float,
                                +                      Tensor Build Float)
                                +                  -- ^ (__activations__, __min_activations__, __max_activations__)
                                +                  --
                                +                  -- * __activations__: Has the same output shape as "features".
                                +                  --
                                +                  -- * __min_activations__: The float value that the lowest quantized value represents.
                                +                  --
                                +                  -- * __max_activations__: The float value that the highest quantized value represents.
                                +quantizedRelu6 = quantizedRelu6' id
                                +quantizedRelu6' :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16,
                                +                                                                 Data.Int.Int32,
                                +                                                                 Data.Word.Word16,
                                +                                                                 Data.Word.Word8] tinput,
                                +                                                         OneOf '[Data.Int.Int16,
                                +                                                                 Data.Int.Int32,
                                +                                                                 Data.Word.Word16,
                                +                                                                 Data.Word.Word8] out_type) =>
                                +                   OpParams ->
                                +                   Tensor v'1 tinput -- ^ __features__
                                +                   -> Tensor v'2 Float -- ^ __min_features__: The float value that the lowest quantized value represents.
                                +                   -> Tensor v'3 Float -- ^ __max_features__: The float value that the highest quantized value represents.
                                +                   -> (Tensor Build out_type, Tensor Build Float,
                                +                       Tensor Build Float)
                                +                   -- ^ (__activations__, __min_activations__, __max_activations__)
                                +                   --
                                +                   -- * __activations__: Has the same output shape as "features".
                                +                   --
                                +                   -- * __min_activations__: The float value that the lowest quantized value represents.
                                +                   --
                                +                   -- * __max_activations__: The float value that the highest quantized value represents.
                                +quantizedRelu6' op'options features min_features
                                +                max_features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features,
                                +                                                             buildInputs min_features,
                                +                                                             buildInputs max_features]
                                +        return (opDef "QuantizedRelu6"
                                +                & opAttr "Tinput" .~ tensorType (undefined :: tinput)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "features" type_attr: "Tinput" }
                                +input_arg {
                                +  name: "min_features"
                                +  description: "The float value that the lowest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_features"
                                +  description: "The float value that the highest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "activations"
                                +  description: "Has the same output shape as \"features\"."
                                +  type_attr: "out_type"
                                +}
                                +output_arg {
                                +  name: "min_activations"
                                +  description: "The float value that the lowest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_activations"
                                +  description: "The float value that the highest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "Tinput"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  default_value { type: DT_QUINT8 }
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
                                +
                                +quantizedReluX :: forall v'1 v'2 v'3 v'4 tinput
                                +                  out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Word.Word16, Data.Word.Word8] tinput,
                                +                              OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Word.Word16,
                                +                                      Data.Word.Word8] out_type) => 
                                +                  Tensor v'1 tinput -- ^ __features__
                                +                  -> Tensor v'2 Float -- ^ __max_value__
                                +                  -> Tensor v'3 Float -- ^ __min_features__: The float value that the lowest quantized value represents.
                                +                  -> Tensor v'4 Float -- ^ __max_features__: The float value that the highest quantized value represents.
                                +                  -> (Tensor Build out_type, Tensor Build Float,
                                +                      Tensor Build Float)
                                +                  -- ^ (__activations__, __min_activations__, __max_activations__)
                                +                  --
                                +                  -- * __activations__: Has the same output shape as "features".
                                +                  --
                                +                  -- * __min_activations__: The float value that the lowest quantized value represents.
                                +                  --
                                +                  -- * __max_activations__: The float value that the highest quantized value represents.
                                +quantizedReluX = quantizedReluX' id
                                +quantizedReluX' :: forall v'1 v'2 v'3 v'4 tinput
                                +                   out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Word.Word16,
                                +                                       Data.Word.Word8] tinput,
                                +                               OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Word.Word16,
                                +                                       Data.Word.Word8] out_type) => OpParams ->
                                +                   Tensor v'1 tinput -- ^ __features__
                                +                   -> Tensor v'2 Float -- ^ __max_value__
                                +                   -> Tensor v'3 Float -- ^ __min_features__: The float value that the lowest quantized value represents.
                                +                   -> Tensor v'4 Float -- ^ __max_features__: The float value that the highest quantized value represents.
                                +                   -> (Tensor Build out_type, Tensor Build Float,
                                +                       Tensor Build Float)
                                +                   -- ^ (__activations__, __min_activations__, __max_activations__)
                                +                   --
                                +                   -- * __activations__: Has the same output shape as "features".
                                +                   --
                                +                   -- * __min_activations__: The float value that the lowest quantized value represents.
                                +                   --
                                +                   -- * __max_activations__: The float value that the highest quantized value represents.
                                +quantizedReluX' op'options features max_value min_features
                                +                max_features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features,
                                +                                                             buildInputs max_value,
                                +                                                             buildInputs min_features,
                                +                                                             buildInputs max_features]
                                +        return (opDef "QuantizedReluX"
                                +                & opAttr "Tinput" .~ tensorType (undefined :: tinput)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "features" type_attr: "Tinput" }
                                +input_arg { name: "max_value" type: DT_FLOAT }
                                +input_arg {
                                +  name: "min_features"
                                +  description: "The float value that the lowest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "max_features"
                                +  description: "The float value that the highest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "activations"
                                +  description: "Has the same output shape as \"features\"."
                                +  type_attr: "out_type"
                                +}
                                +output_arg {
                                +  name: "min_activations"
                                +  description: "The float value that the lowest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "max_activations"
                                +  description: "The float value that the highest quantized value represents."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "Tinput"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  default_value { type: DT_QUINT8 }
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Reshapes a quantized tensor as per the Reshape op.
                                +--
                                +-- ```
                                +quantizedReshape :: forall v'1 v'2 v'3 v'4 t tshape . (TensorType t,
                                +                                                       OneOf '[Data.Int.Int32,
                                +                                                               Data.Int.Int64] tshape) =>
                                +                    
                                +                    Tensor v'1 t -- ^ __tensor__
                                +                    -> Tensor v'2 tshape -- ^ __shape__: Defines the shape of the output tensor.
                                +                    -> Tensor v'3 Float -- ^ __input_min__: The minimum value of the input.
                                +                    -> Tensor v'4 Float -- ^ __input_max__: The maximum value of the input.
                                +                    -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +                    -- ^ (__output__, __output_min__, __output_max__)
                                +                    --
                                +                    -- * __output__
                                +                    --
                                +                    -- * __output_min__: This value is copied from input_min.
                                +                    --
                                +                    -- * __output_max__: This value is copied from input_max.
                                +quantizedReshape = quantizedReshape' id
                                +quantizedReshape' :: forall v'1 v'2 v'3 v'4 t tshape . (TensorType t,
                                +                                                        OneOf '[Data.Int.Int32,
                                +                                                                Data.Int.Int64] tshape) =>
                                +                     OpParams ->
                                +                     Tensor v'1 t -- ^ __tensor__
                                +                     -> Tensor v'2 tshape -- ^ __shape__: Defines the shape of the output tensor.
                                +                     -> Tensor v'3 Float -- ^ __input_min__: The minimum value of the input.
                                +                     -> Tensor v'4 Float -- ^ __input_max__: The maximum value of the input.
                                +                     -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
                                +                     -- ^ (__output__, __output_min__, __output_max__)
                                +                     --
                                +                     -- * __output__
                                +                     --
                                +                     -- * __output_min__: This value is copied from input_min.
                                +                     --
                                +                     -- * __output_max__: This value is copied from input_max.
                                +quantizedReshape' op'options tensor shape input_min
                                +                  input_max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor,
                                +                                                             buildInputs shape,
                                +                                                             buildInputs input_min,
                                +                                                             buildInputs input_max]
                                +        return (opDef "QuantizedReshape"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tshape" .~ tensorType (undefined :: tshape)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "tensor" type_attr: "T" }
                                +input_arg {
                                +  name: "shape"
                                +  description: "Defines the shape of the output tensor."
                                +  type_attr: "Tshape"
                                +}
                                +input_arg {
                                +  name: "input_min"
                                +  description: "The minimum value of the input."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "input_max"
                                +  description: "The maximum value of the input."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +output_arg {
                                +  name: "output_min"
                                +  description: "This value is copied from input_min."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output_max"
                                +  description: "This value is copied from input_max."
                                +  type: DT_FLOAT
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tshape"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Resize quantized `images` to `size` using quantized bilinear interpolation.
                                +--
                                +-- Input images and output images must be quantized types.
                                +quantizedResizeBilinear :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32,
                                +                                                               Data.Word.Word8,
                                +                                                               Float] t) => 
                                +                           Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +                           -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                                        -- new size for the images.
                                +                           -> Tensor v'3 Float -- ^ __min__
                                +                           -> Tensor v'4 Float -- ^ __max__
                                +                           -> (Tensor Build t, Tensor Build Float,
                                +                               Tensor Build Float)
                                +                           -- ^ (__resized_images__, __out_min__, __out_max__)
                                +                           --
                                +                           -- * __resized_images__: 4-D with shape
                                +                           -- `[batch, new_height, new_width, channels]`.
                                +                           --
                                +                           -- * __out_min__
                                +                           --
                                +                           -- * __out_max__
                                +quantizedResizeBilinear = quantizedResizeBilinear' id
                                +quantizedResizeBilinear' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32,
                                +                                                                Data.Word.Word8,
                                +                                                                Float] t) =>
                                +                            OpParams ->
                                +                            Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +                            -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                                         -- new size for the images.
                                +                            -> Tensor v'3 Float -- ^ __min__
                                +                            -> Tensor v'4 Float -- ^ __max__
                                +                            -> (Tensor Build t, Tensor Build Float,
                                +                                Tensor Build Float)
                                +                            -- ^ (__resized_images__, __out_min__, __out_max__)
                                +                            --
                                +                            -- * __resized_images__: 4-D with shape
                                +                            -- `[batch, new_height, new_width, channels]`.
                                +                            --
                                +                            -- * __out_min__
                                +                            --
                                +                            -- * __out_max__
                                +quantizedResizeBilinear' op'options images size min max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs size,
                                +                                                             buildInputs min,
                                +                                                             buildInputs max]
                                +        return (opDef "QuantizedResizeBilinear"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
                                +  type: DT_INT32
                                +}
                                +input_arg { name: "min" type: DT_FLOAT }
                                +input_arg { name: "max" type: DT_FLOAT }
                                +output_arg {
                                +  name: "resized_images"
                                +  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "out_min" type: DT_FLOAT }
                                +output_arg { name: "out_max" type: DT_FLOAT }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_QUINT8 type: DT_QINT32 type: DT_FLOAT }
                                +  }
                                +}
                                +attr {
                                +  name: "align_corners"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
                                +}
                                +-}
                                +
                                +-- | Closes the given queue.
                                +--
                                +-- This operation signals that no more elements will be enqueued in the
                                +-- given queue. Subsequent Enqueue(Many) operations will fail.
                                +-- Subsequent Dequeue(Many) operations will continue to succeed if
                                +-- sufficient elements remain in the queue. Subsequent Dequeue(Many)
                                +-- operations that would block will fail immediately.
                                +queueClose :: forall m' . (MonadBuild m') => 
                                +              Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +              -> m' (ControlNode)
                                +queueClose = queueClose' id
                                +queueClose' :: forall m' . (MonadBuild m') => OpParams ->
                                +               Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +               -> m' (ControlNode)
                                +queueClose' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "QueueClose"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "cancel_pending_enqueues"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, all pending enqueue requests that are\nblocked on the given queue will be canceled."
                                +}
                                +-}
                                +
                                +-- | Closes the given queue.
                                +--
                                +-- This operation signals that no more elements will be enqueued in the
                                +-- given queue. Subsequent Enqueue(Many) operations will fail.
                                +-- Subsequent Dequeue(Many) operations will continue to succeed if
                                +-- sufficient elements remain in the queue. Subsequent Dequeue(Many)
                                +-- operations that would block will fail immediately.
                                +queueCloseV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                -> m' (ControlNode)
                                +queueCloseV2 = queueCloseV2' id
                                +queueCloseV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                 Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                 -> m' (ControlNode)
                                +queueCloseV2' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "QueueCloseV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "cancel_pending_enqueues"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, all pending enqueue requests that are\nblocked on the given queue will be canceled."
                                +}
                                +-}
                                +
                                +-- | Dequeues a tuple of one or more tensors from the given queue.
                                +--
                                +-- This operation has k outputs, where k is the number of components
                                +-- in the tuples stored in the given queue, and output i is the ith
                                +-- component of the dequeued tuple.
                                +-- 
                                +-- N.B. If the queue is empty, this operation will block until an element
                                +-- has been dequeued (or 'timeout_ms' elapses, if specified).
                                +queueDequeue :: forall component_types m' . (MonadBuild m',
                                +                                             TensorTypes component_types) => 
                                +                Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeue = queueDequeue' id
                                +queueDequeue' :: forall component_types m' . (MonadBuild m',
                                +                                              TensorTypes component_types) =>
                                +                 OpParams ->
                                +                 Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                 -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeue' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "QueueDequeue"
                                +                    & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "components"
                                +  description: "One or more tensors that were dequeued as a tuple."
                                +  type_list_attr: "component_types"
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a tuple."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Dequeues `n` tuples of one or more tensors from the given queue.
                                +--
                                +-- If the queue is closed and there are fewer than `n` elements, then an
                                +-- OutOfRange error is returned.
                                +-- 
                                +-- This operation concatenates queue-element component tensors along the
                                +-- 0th dimension to make a single component tensor.  All of the components
                                +-- in the dequeued tuple will have size `n` in the 0th dimension.
                                +-- 
                                +-- This operation has `k` outputs, where `k` is the number of components in
                                +-- the tuples stored in the given queue, and output `i` is the ith
                                +-- component of the dequeued tuple.
                                +-- 
                                +-- N.B. If the queue is empty, this operation will block until `n` elements
                                +-- have been dequeued (or 'timeout_ms' elapses, if specified).
                                +queueDequeueMany :: forall v'2 component_types m' . (MonadBuild m',
                                +                                                     TensorTypes component_types) =>
                                +                    
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue.
                                +                    -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueMany = queueDequeueMany' id
                                +queueDequeueMany' :: forall v'2 component_types m' . (MonadBuild m',
                                +                                                      TensorTypes component_types) =>
                                +                     OpParams ->
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                     -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue.
                                +                     -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueMany' op'options handle n | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs n]
                                +        buildOp [] (opDef "QueueDequeueMany"
                                +                    & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "n"
                                +  description: "The number of tuples to dequeue."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "components"
                                +  description: "One or more tensors that were dequeued as a tuple."
                                +  type_list_attr: "component_types"
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a tuple."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Dequeues `n` tuples of one or more tensors from the given queue.
                                +--
                                +-- If the queue is closed and there are fewer than `n` elements, then an
                                +-- OutOfRange error is returned.
                                +-- 
                                +-- This operation concatenates queue-element component tensors along the
                                +-- 0th dimension to make a single component tensor.  All of the components
                                +-- in the dequeued tuple will have size `n` in the 0th dimension.
                                +-- 
                                +-- This operation has `k` outputs, where `k` is the number of components in
                                +-- the tuples stored in the given queue, and output `i` is the ith
                                +-- component of the dequeued tuple.
                                +-- 
                                +-- N.B. If the queue is empty, this operation will block until `n` elements
                                +-- have been dequeued (or 'timeout_ms' elapses, if specified).
                                +queueDequeueManyV2 :: forall v'1 v'2 component_types m' . (MonadBuild m',
                                +                                                           TensorTypes component_types) =>
                                +                      
                                +                      Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                      -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue.
                                +                      -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueManyV2 = queueDequeueManyV2' id
                                +queueDequeueManyV2' :: forall v'1 v'2 component_types m' . (MonadBuild m',
                                +                                                            TensorTypes component_types) =>
                                +                       OpParams ->
                                +                       Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                       -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue.
                                +                       -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueManyV2' op'options handle n | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs n]
                                +        buildOp [] (opDef "QueueDequeueManyV2"
                                +                    & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "n"
                                +  description: "The number of tuples to dequeue."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "components"
                                +  description: "One or more tensors that were dequeued as a tuple."
                                +  type_list_attr: "component_types"
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a tuple."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Dequeues `n` tuples of one or more tensors from the given queue.
                                +--
                                +-- This operation is not supported by all queues.  If a queue does not support
                                +-- DequeueUpTo, then an Unimplemented error is returned.
                                +-- 
                                +-- If the queue is closed and there are more than 0 but less than `n`
                                +-- elements remaining, then instead of returning an OutOfRange error like
                                +-- QueueDequeueMany, less than `n` elements are returned immediately.  If
                                +-- the queue is closed and there are 0 elements left in the queue, then
                                +-- an OutOfRange error is returned just like in QueueDequeueMany.
                                +-- Otherwise the behavior is identical to QueueDequeueMany:
                                +-- 
                                +-- This operation concatenates queue-element component tensors along the
                                +-- 0th dimension to make a single component tensor.  All of the components
                                +-- in the dequeued tuple will have size `n` in the 0th dimension.
                                +-- 
                                +-- This operation has k outputs, where `k` is the number of components in
                                +-- the tuples stored in the given queue, and output `i` is the ith
                                +-- component of the dequeued tuple.
                                +queueDequeueUpTo :: forall v'2 component_types m' . (MonadBuild m',
                                +                                                     TensorTypes component_types) =>
                                +                    
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue.
                                +                    -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueUpTo = queueDequeueUpTo' id
                                +queueDequeueUpTo' :: forall v'2 component_types m' . (MonadBuild m',
                                +                                                      TensorTypes component_types) =>
                                +                     OpParams ->
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                     -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue.
                                +                     -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueUpTo' op'options handle n | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs n]
                                +        buildOp [] (opDef "QueueDequeueUpTo"
                                +                    & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "n"
                                +  description: "The number of tuples to dequeue."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "components"
                                +  description: "One or more tensors that were dequeued as a tuple."
                                +  type_list_attr: "component_types"
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a tuple."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Dequeues `n` tuples of one or more tensors from the given queue.
                                +--
                                +-- This operation is not supported by all queues.  If a queue does not support
                                +-- DequeueUpTo, then an Unimplemented error is returned.
                                +-- 
                                +-- If the queue is closed and there are more than 0 but less than `n`
                                +-- elements remaining, then instead of returning an OutOfRange error like
                                +-- QueueDequeueMany, less than `n` elements are returned immediately.  If
                                +-- the queue is closed and there are 0 elements left in the queue, then
                                +-- an OutOfRange error is returned just like in QueueDequeueMany.
                                +-- Otherwise the behavior is identical to QueueDequeueMany:
                                +-- 
                                +-- This operation concatenates queue-element component tensors along the
                                +-- 0th dimension to make a single component tensor.  All of the components
                                +-- in the dequeued tuple will have size n in the 0th dimension.
                                +-- 
                                +-- This operation has `k` outputs, where `k` is the number of components in
                                +-- the tuples stored in the given queue, and output `i` is the ith
                                +-- component of the dequeued tuple.
                                +queueDequeueUpToV2 :: forall v'1 v'2 component_types m' . (MonadBuild m',
                                +                                                           TensorTypes component_types) =>
                                +                      
                                +                      Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                      -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue.
                                +                      -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueUpToV2 = queueDequeueUpToV2' id
                                +queueDequeueUpToV2' :: forall v'1 v'2 component_types m' . (MonadBuild m',
                                +                                                            TensorTypes component_types) =>
                                +                       OpParams ->
                                +                       Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                       -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue.
                                +                       -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueUpToV2' op'options handle n | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs n]
                                +        buildOp [] (opDef "QueueDequeueUpToV2"
                                +                    & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "n"
                                +  description: "The number of tuples to dequeue."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "components"
                                +  description: "One or more tensors that were dequeued as a tuple."
                                +  type_list_attr: "component_types"
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a tuple."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Dequeues a tuple of one or more tensors from the given queue.
                                +--
                                +-- This operation has k outputs, where k is the number of components
                                +-- in the tuples stored in the given queue, and output i is the ith
                                +-- component of the dequeued tuple.
                                +-- 
                                +-- N.B. If the queue is empty, this operation will block until an element
                                +-- has been dequeued (or 'timeout_ms' elapses, if specified).
                                +queueDequeueV2 :: forall v'1 component_types m' . (MonadBuild m',
                                +                                                   TensorTypes component_types) =>
                                +                  
                                +                  Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                  -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueV2 = queueDequeueV2' id
                                +queueDequeueV2' :: forall v'1 component_types m' . (MonadBuild m',
                                +                                                    TensorTypes component_types) =>
                                +                   OpParams ->
                                +                   Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                   -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple.
                                +queueDequeueV2' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "QueueDequeueV2"
                                +                    & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg {
                                +  name: "components"
                                +  description: "One or more tensors that were dequeued as a tuple."
                                +  type_list_attr: "component_types"
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a tuple."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Enqueues a tuple of one or more tensors in the given queue.
                                +--
                                +-- The components input has k elements, which correspond to the components of
                                +-- tuples stored in the given queue.
                                +-- 
                                +-- N.B. If the queue is full, this operation will block until the given
                                +-- element has been enqueued (or 'timeout_ms' elapses, if specified).
                                +queueEnqueue :: forall v'2 tcomponents m' . (MonadBuild m',
                                +                                             TensorTypes tcomponents) => 
                                +                Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should be taken.
                                +                -> m' (ControlNode)
                                +queueEnqueue = queueEnqueue' id
                                +queueEnqueue' :: forall v'2 tcomponents m' . (MonadBuild m',
                                +                                              TensorTypes tcomponents) =>
                                +                 OpParams ->
                                +                 Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                 -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should be taken.
                                +                 -> m' (ControlNode)
                                +queueEnqueue' op'options handle components | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs components]
                                +        buildOp [] (opDef "QueueEnqueue"
                                +                    & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "components"
                                +  description: "One or more tensors from which the enqueued tensors should be taken."
                                +  type_list_attr: "Tcomponents"
                                +}
                                +attr {
                                +  name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Enqueues zero or more tuples of one or more tensors in the given queue.
                                +--
                                +-- This operation slices each component tensor along the 0th dimension to
                                +-- make multiple queue elements. All of the tuple components must have the
                                +-- same size in the 0th dimension.
                                +-- 
                                +-- The components input has k elements, which correspond to the components of
                                +-- tuples stored in the given queue.
                                +-- 
                                +-- N.B. If the queue is full, this operation will block until the given
                                +-- elements have been enqueued (or 'timeout_ms' elapses, if specified).
                                +queueEnqueueMany :: forall v'2 tcomponents m' . (MonadBuild m',
                                +                                                 TensorTypes tcomponents) => 
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                    -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should
                                +                                                    -- be taken.
                                +                    -> m' (ControlNode)
                                +queueEnqueueMany = queueEnqueueMany' id
                                +queueEnqueueMany' :: forall v'2 tcomponents m' . (MonadBuild m',
                                +                                                  TensorTypes tcomponents) =>
                                +                     OpParams ->
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                     -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should
                                +                                                     -- be taken.
                                +                     -> m' (ControlNode)
                                +queueEnqueueMany' op'options handle components | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs components]
                                +        buildOp [] (opDef "QueueEnqueueMany"
                                +                    & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "components"
                                +  description: "One or more tensors from which the enqueued tensors should\nbe taken."
                                +  type_list_attr: "Tcomponents"
                                +}
                                +attr {
                                +  name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Enqueues zero or more tuples of one or more tensors in the given queue.
                                +--
                                +-- This operation slices each component tensor along the 0th dimension to
                                +-- make multiple queue elements. All of the tuple components must have the
                                +-- same size in the 0th dimension.
                                +-- 
                                +-- The components input has k elements, which correspond to the components of
                                +-- tuples stored in the given queue.
                                +-- 
                                +-- N.B. If the queue is full, this operation will block until the given
                                +-- elements have been enqueued (or 'timeout_ms' elapses, if specified).
                                +queueEnqueueManyV2 :: forall v'1 v'2 tcomponents m' . (MonadBuild m',
                                +                                                       TensorTypes tcomponents) =>
                                +                      
                                +                      Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                      -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should
                                +                                                      -- be taken.
                                +                      -> m' (ControlNode)
                                +queueEnqueueManyV2 = queueEnqueueManyV2' id
                                +queueEnqueueManyV2' :: forall v'1 v'2 tcomponents m' . (MonadBuild m',
                                +                                                        TensorTypes tcomponents) =>
                                +                       OpParams ->
                                +                       Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                       -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should
                                +                                                       -- be taken.
                                +                       -> m' (ControlNode)
                                +queueEnqueueManyV2' op'options handle components | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs components]
                                +        buildOp [] (opDef "QueueEnqueueManyV2"
                                +                    & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "components"
                                +  description: "One or more tensors from which the enqueued tensors should\nbe taken."
                                +  type_list_attr: "Tcomponents"
                                +}
                                +attr {
                                +  name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Enqueues a tuple of one or more tensors in the given queue.
                                +--
                                +-- The components input has k elements, which correspond to the components of
                                +-- tuples stored in the given queue.
                                +-- 
                                +-- N.B. If the queue is full, this operation will block until the given
                                +-- element has been enqueued (or 'timeout_ms' elapses, if specified).
                                +queueEnqueueV2 :: forall v'1 v'2 tcomponents m' . (MonadBuild m',
                                +                                                   TensorTypes tcomponents) => 
                                +                  Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                  -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should be taken.
                                +                  -> m' (ControlNode)
                                +queueEnqueueV2 = queueEnqueueV2' id
                                +queueEnqueueV2' :: forall v'1 v'2 tcomponents m' . (MonadBuild m',
                                +                                                    TensorTypes tcomponents) =>
                                +                   OpParams ->
                                +                   Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                   -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should be taken.
                                +                   -> m' (ControlNode)
                                +queueEnqueueV2' op'options handle components | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs components]
                                +        buildOp [] (opDef "QueueEnqueueV2"
                                +                    & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "components"
                                +  description: "One or more tensors from which the enqueued tensors should be taken."
                                +  type_list_attr: "Tcomponents"
                                +}
                                +attr {
                                +  name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr {
                                +  name: "timeout_ms"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet."
                                +}
                                +-}
                                +
                                +-- | Returns true if queue is closed.
                                +--
                                +-- This operation returns true if the queue is closed and false if the queue
                                +-- is open.
                                +queueIsClosed :: forall m' . (MonadBuild m') => 
                                +                 Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                 -> m' (Tensor Value Bool) -- ^ __is_closed__
                                +queueIsClosed = queueIsClosed' id
                                +queueIsClosed' :: forall m' . (MonadBuild m') => OpParams ->
                                +                  Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +                  -> m' (Tensor Value Bool) -- ^ __is_closed__
                                +queueIsClosed' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "QueueIsClosed"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg { name: "is_closed" type: DT_BOOL }
                                +-}
                                +
                                +-- | Returns true if queue is closed.
                                +--
                                +-- This operation returns true if the queue is closed and false if the queue
                                +-- is open.
                                +queueIsClosedV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                   Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                   -> m' (Tensor Value Bool) -- ^ __is_closed__
                                +queueIsClosedV2 = queueIsClosedV2' id
                                +queueIsClosedV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                    Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                    -> m' (Tensor Value Bool) -- ^ __is_closed__
                                +queueIsClosedV2' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "QueueIsClosedV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg { name: "is_closed" type: DT_BOOL }
                                +-}
                                +
                                +-- | Computes the number of elements in the given queue.
                                +
                                +queueSize :: forall m' . (MonadBuild m') => 
                                +             Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +             -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of elements in the given queue.
                                +queueSize = queueSize' id
                                +queueSize' :: forall m' . (MonadBuild m') => OpParams ->
                                +              Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
                                +              -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of elements in the given queue.
                                +queueSize' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "QueueSize"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "The number of elements in the given queue."
                                +  type: DT_INT32
                                +}
                                +-}
                                +
                                +-- | Computes the number of elements in the given queue.
                                +
                                +queueSizeV2 :: forall v'1 m' . (MonadBuild m') => 
                                +               Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +               -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of elements in the given queue.
                                +queueSizeV2 = queueSizeV2' id
                                +queueSizeV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue.
                                +                -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of elements in the given queue.
                                +queueSizeV2' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "QueueSizeV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a queue."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "The number of elements in the given queue."
                                +  type: DT_INT32
                                +}
                                +-}
                                +
                                +-- | Real-valued fast Fourier transform.
                                +--
                                +-- Computes the 1-dimensional discrete Fourier transform of a real-valued signal
                                +-- over the inner-most dimension of `input`.
                                +-- 
                                +-- Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
                                +-- `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
                                +-- followed by the `fft_length / 2` positive-frequency terms.
                                +-- 
                                +-- Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
                                +-- corresponding dimension of `input`, the dimension is cropped. If it is larger,
                                +-- the dimension is padded with zeros.
                                +rFFT :: 
                                +        Tensor v'1 Float -- ^ __input__: A float32 tensor.
                                +        -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [1]. The FFT length.
                                +        -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most
                                +        --   dimension of `input` is replaced with the `fft_length / 2 + 1` unique
                                +        --   frequency components of its 1D Fourier transform.
                                +        -- 
                                +        -- @compatibility(numpy)
                                +        -- Equivalent to np.fft.rfft
                                +        -- @end_compatibility
                                +rFFT = rFFT' id
                                +rFFT' :: OpParams ->
                                +         Tensor v'1 Float -- ^ __input__: A float32 tensor.
                                +         -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [1]. The FFT length.
                                +         -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most
                                +         --   dimension of `input` is replaced with the `fft_length / 2 + 1` unique
                                +         --   frequency components of its 1D Fourier transform.
                                +         -- 
                                +         -- @compatibility(numpy)
                                +         -- Equivalent to np.fft.rfft
                                +         -- @end_compatibility
                                +rFFT' op'options input fft_length | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs fft_length]
                                +        return (opDef "RFFT"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A float32 tensor." type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "fft_length"
                                +  description: "An int32 tensor of shape [1]. The FFT length."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A complex64 tensor of the same rank as `input`. The inner-most\n  dimension of `input` is replaced with the `fft_length / 2 + 1` unique\n  frequency components of its 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft\n@end_compatibility"
                                +  type: DT_COMPLEX64
                                +}
                                +-}
                                +
                                +-- | 2D real-valued fast Fourier transform.
                                +--
                                +-- Computes the 2-dimensional discrete Fourier transform of a real-valued signal
                                +-- over the inner-most 2 dimensions of `input`.
                                +-- 
                                +-- Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
                                +-- `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
                                +-- of `output`: the zero-frequency term, followed by the `fft_length / 2`
                                +-- positive-frequency terms.
                                +-- 
                                +-- Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
                                +-- corresponding dimension of `input`, the dimension is cropped. If it is larger,
                                +-- the dimension is padded with zeros.
                                +rFFT2D :: 
                                +          Tensor v'1 Float -- ^ __input__: A float32 tensor.
                                +          -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [2]. The FFT length for each dimension.
                                +          -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most 2
                                +          --   dimensions of `input` are replaced with their 2D Fourier transform. The
                                +          --   inner-most dimension contains `fft_length / 2 + 1` unique frequency
                                +          --   components.
                                +          -- 
                                +          -- @compatibility(numpy)
                                +          -- Equivalent to np.fft.rfft2
                                +          -- @end_compatibility
                                +rFFT2D = rFFT2D' id
                                +rFFT2D' :: OpParams ->
                                +           Tensor v'1 Float -- ^ __input__: A float32 tensor.
                                +           -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [2]. The FFT length for each dimension.
                                +           -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most 2
                                +           --   dimensions of `input` are replaced with their 2D Fourier transform. The
                                +           --   inner-most dimension contains `fft_length / 2 + 1` unique frequency
                                +           --   components.
                                +           -- 
                                +           -- @compatibility(numpy)
                                +           -- Equivalent to np.fft.rfft2
                                +           -- @end_compatibility
                                +rFFT2D' op'options input fft_length | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs fft_length]
                                +        return (opDef "RFFT2D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A float32 tensor." type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "fft_length"
                                +  description: "An int32 tensor of shape [2]. The FFT length for each dimension."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A complex64 tensor of the same rank as `input`. The inner-most 2\n  dimensions of `input` are replaced with their 2D Fourier transform. The\n  inner-most dimension contains `fft_length / 2 + 1` unique frequency\n  components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft2\n@end_compatibility"
                                +  type: DT_COMPLEX64
                                +}
                                +-}
                                +
                                +-- | 3D real-valued fast Fourier transform.
                                +--
                                +-- Computes the 3-dimensional discrete Fourier transform of a real-valued signal
                                +-- over the inner-most 3 dimensions of `input`.
                                +-- 
                                +-- Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
                                +-- `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
                                +-- of `output`: the zero-frequency term, followed by the `fft_length / 2`
                                +-- positive-frequency terms.
                                +-- 
                                +-- Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
                                +-- corresponding dimension of `input`, the dimension is cropped. If it is larger,
                                +-- the dimension is padded with zeros.
                                +rFFT3D :: 
                                +          Tensor v'1 Float -- ^ __input__: A float32 tensor.
                                +          -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [3]. The FFT length for each dimension.
                                +          -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most 3
                                +          --   dimensions of `input` are replaced with the their 3D Fourier transform. The
                                +          --   inner-most dimension contains `fft_length / 2 + 1` unique frequency
                                +          --   components.
                                +          -- 
                                +          -- @compatibility(numpy)
                                +          -- Equivalent to np.fft.rfftn with 3 dimensions.
                                +          -- @end_compatibility
                                +rFFT3D = rFFT3D' id
                                +rFFT3D' :: OpParams ->
                                +           Tensor v'1 Float -- ^ __input__: A float32 tensor.
                                +           -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [3]. The FFT length for each dimension.
                                +           -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most 3
                                +           --   dimensions of `input` are replaced with the their 3D Fourier transform. The
                                +           --   inner-most dimension contains `fft_length / 2 + 1` unique frequency
                                +           --   components.
                                +           -- 
                                +           -- @compatibility(numpy)
                                +           -- Equivalent to np.fft.rfftn with 3 dimensions.
                                +           -- @end_compatibility
                                +rFFT3D' op'options input fft_length | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs fft_length]
                                +        return (opDef "RFFT3D"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "A float32 tensor." type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "fft_length"
                                +  description: "An int32 tensor of shape [3]. The FFT length for each dimension."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A complex64 tensor of the same rank as `input`. The inner-most 3\n  dimensions of `input` are replaced with the their 3D Fourier transform. The\n  inner-most dimension contains `fft_length / 2 + 1` unique frequency\n  components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfftn with 3 dimensions.\n@end_compatibility"
                                +  type: DT_COMPLEX64
                                +}
                                +-}
                                +
                                +-- | Converts one or more images from RGB to HSV.
                                +--
                                +-- Outputs a tensor of the same shape as the `images` tensor, containing the HSV
                                +-- value of the pixels. The output is only well defined if the value in `images`
                                +-- are in `[0,1]`.
                                +-- 
                                +-- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
                                +-- `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
                                +-- corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
                                +rGBToHSV :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +            Tensor v'1 t -- ^ __images__: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
                                +            -> Tensor Build t -- ^ __output__: `images` converted to HSV.
                                +rGBToHSV = rGBToHSV' id
                                +rGBToHSV' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __images__: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
                                +             -> Tensor Build t -- ^ __output__: `images` converted to HSV.
                                +rGBToHSV' op'options images | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images]
                                +        return (opDef "RGBToHSV"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "1-D or higher rank. RGB data to convert. Last dimension must be size 3."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "`images` converted to HSV."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Randomly crop `image`.
                                +--
                                +-- `size` is a 1-D int64 tensor with 2 elements representing the crop height and
                                +-- width.  The values must be non negative.
                                +-- 
                                +-- This Op picks a random location in `image` and crops a `height` by `width`
                                +-- rectangle from that location.  The random location is picked so the cropped
                                +-- area will fit inside the original image.
                                +randomCrop :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16,
                                +                                                            Data.Int.Int32,
                                +                                                            Data.Int.Int64,
                                +                                                            Data.Int.Int8,
                                +                                                            Data.Word.Word8,
                                +                                                            Double, Float] t) =>
                                +              
                                +              Tensor v'1 t -- ^ __image__: 3-D of shape `[height, width, channels]`.
                                +              -> Tensor v'2 Data.Int.Int64 -- ^ __size__: 1-D of length 2 containing: `crop_height`, `crop_width`..
                                +              -> m' (Tensor Value t) -- ^ __output__: 3-D of shape `[crop_height, crop_width, channels].`
                                +randomCrop = randomCrop' id
                                +randomCrop' :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16,
                                +                                                             Data.Int.Int32,
                                +                                                             Data.Int.Int64,
                                +                                                             Data.Int.Int8,
                                +                                                             Data.Word.Word8,
                                +                                                             Double,
                                +                                                             Float] t) =>
                                +               OpParams ->
                                +               Tensor v'1 t -- ^ __image__: 3-D of shape `[height, width, channels]`.
                                +               -> Tensor v'2 Data.Int.Int64 -- ^ __size__: 1-D of length 2 containing: `crop_height`, `crop_width`..
                                +               -> m' (Tensor Value t) -- ^ __output__: 3-D of shape `[crop_height, crop_width, channels].`
                                +randomCrop' op'options image size | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image,
                                +                                                             buildInputs size]
                                +        buildOp [] (opDef "RandomCrop"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "image"
                                +  description: "3-D of shape `[height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "1-D of length 2 containing: `crop_height`, `crop_width`.."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "3-D of shape `[crop_height, crop_width, channels].`"
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +-}
                                +
                                +-- | Outputs random values from the Gamma distribution(s) described by alpha.
                                +--
                                +-- This op uses the algorithm by Marsaglia et al. to acquire samples via
                                +-- transformation-rejection from pairs of uniform and normal random variables.
                                +-- See http://dl.acm.org/citation.cfm?id=358414
                                +randomGamma :: forall v'1 v'2 s t m' . (MonadBuild m', OneOf '[Data.Int.Int32,
                                +                                                               Data.Int.Int64] s,
                                +                                        OneOf '[Data.Word.Word16, Double,
                                +                                                Float] t) => 
                                +               Tensor v'1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each
                                +                            -- distribution described by the shape parameters given in alpha.
                                +               -> Tensor v'2 t -- ^ __alpha__: A tensor in which each scalar is a "shape" parameter describing the
                                +                               -- associated gamma distribution.
                                +               -> m' (Tensor Value t) -- ^ __output__: A tensor with shape `shape + shape(alpha)`. Each slice
                                +               -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
                                +               -- `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
                                +randomGamma = randomGamma' id
                                +randomGamma' :: forall v'1 v'2 s t m' . (MonadBuild m', OneOf '[Data.Int.Int32,
                                +                                                                Data.Int.Int64] s,
                                +                                         OneOf '[Data.Word.Word16, Double,
                                +                                                 Float] t) => OpParams ->
                                +                Tensor v'1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each
                                +                             -- distribution described by the shape parameters given in alpha.
                                +                -> Tensor v'2 t -- ^ __alpha__: A tensor in which each scalar is a "shape" parameter describing the
                                +                                -- associated gamma distribution.
                                +                -> m' (Tensor Value t) -- ^ __output__: A tensor with shape `shape + shape(alpha)`. Each slice
                                +                -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
                                +                -- `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
                                +randomGamma' op'options shape alpha | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape,
                                +                                                             buildInputs alpha]
                                +        buildOp [] (opDef "RandomGamma"
                                +                    & opAttr "S" .~ tensorType (undefined :: s)
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in alpha."
                                +  type_attr: "S"
                                +}
                                +input_arg {
                                +  name: "alpha"
                                +  description: "A tensor in which each scalar is a \"shape\" parameter describing the\nassociated gamma distribution."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A tensor with shape `shape + shape(alpha)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "S"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Outputs random values from the Poisson distribution(s) described by rate.
                                +--
                                +-- This op uses two algorithms, depending on rate. If rate >= 10, then
                                +-- the algorithm by Hormann is used to acquire samples via
                                +-- transformation-rejection.
                                +-- See http://www.sciencedirect.com/science/article/pii/0167668793909974.
                                +-- 
                                +-- Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
                                +-- random variables.
                                +-- See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
                                +-- Programming, Volume 2. Addison Wesley
                                +randomPoisson :: forall v'1 v'2 s dtype m' . (MonadBuild m',
                                +                                              OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] s,
                                +                                              OneOf '[Data.Word.Word16, Double,
                                +                                                      Float] dtype) => 
                                +                 Tensor v'1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each
                                +                              -- distribution described by the shape parameters given in rate.
                                +                 -> Tensor v'2 dtype -- ^ __rate__: A tensor in which each scalar is a "rate" parameter describing the
                                +                                     -- associated poisson distribution.
                                +                 -> m' (Tensor Value dtype) -- ^ __output__: A tensor with shape `shape + shape(rate)`. Each slice
                                +                 -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
                                +                 -- `rate[i0, i1, ...iN]`. The dtype of the output matches the dtype of
                                +                 -- rate.
                                +randomPoisson = randomPoisson' id
                                +randomPoisson' :: forall v'1 v'2 s dtype m' . (MonadBuild m',
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] s,
                                +                                               OneOf '[Data.Word.Word16, Double,
                                +                                                       Float] dtype) =>
                                +                  OpParams ->
                                +                  Tensor v'1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each
                                +                               -- distribution described by the shape parameters given in rate.
                                +                  -> Tensor v'2 dtype -- ^ __rate__: A tensor in which each scalar is a "rate" parameter describing the
                                +                                      -- associated poisson distribution.
                                +                  -> m' (Tensor Value dtype) -- ^ __output__: A tensor with shape `shape + shape(rate)`. Each slice
                                +                  -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
                                +                  -- `rate[i0, i1, ...iN]`. The dtype of the output matches the dtype of
                                +                  -- rate.
                                +randomPoisson' op'options shape rate | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape,
                                +                                                             buildInputs rate]
                                +        buildOp [] (opDef "RandomPoisson"
                                +                    & opAttr "S" .~ tensorType (undefined :: s)
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in rate."
                                +  type_attr: "S"
                                +}
                                +input_arg {
                                +  name: "rate"
                                +  description: "A tensor in which each scalar is a \"rate\" parameter describing the\nassociated poisson distribution."
                                +  type_attr: "dtype"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A tensor with shape `shape + shape(rate)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`rate[i0, i1, ...iN]`. The dtype of the output matches the dtype of\nrate."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "S"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Randomly shuffles a tensor along its first dimension.
                                +--
                                +--   The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
                                +--   to one and only one `output[i]`. For example, a mapping that might occur for a
                                +--   3x2 tensor is:
                                +-- 
                                +-- ```
                                +-- [[1, 2],       [[5, 6],
                                +--  [3, 4],  ==>   [1, 2],
                                +--  [5, 6]]        [3, 4]]
                                +-- ```
                                +randomShuffle :: forall v'1 t m' . (MonadBuild m', TensorType t) => 
                                +                 Tensor v'1 t -- ^ __value__: The tensor to be shuffled.
                                +                 -> m' (Tensor Value t) -- ^ __output__: A tensor of same shape and type as `value`, shuffled along its first
                                +                 -- dimension.
                                +randomShuffle = randomShuffle' id
                                +randomShuffle' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __value__: The tensor to be shuffled.
                                +                  -> m' (Tensor Value t) -- ^ __output__: A tensor of same shape and type as `value`, shuffled along its first
                                +                  -- dimension.
                                +randomShuffle' op'options value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value]
                                +        buildOp [] (opDef "RandomShuffle"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "The tensor to be shuffled."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A tensor of same shape and type as `value`, shuffled along its first\ndimension."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | A queue that randomizes the order of elements.
                                +
                                +randomShuffleQueue :: forall m' . (MonadBuild m') => 
                                +                      [DataType] -- ^ __component_types__: The type of each component in a value.
                                +                      -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
                                +randomShuffleQueue = randomShuffleQueue' id
                                +randomShuffleQueue' :: forall m' . (MonadBuild m') => OpParams ->
                                +                       [DataType] -- ^ __component_types__: The type of each component in a value.
                                +                       -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
                                +randomShuffleQueue' op'options component_types | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "RandomShuffleQueue"
                                +                    & opAttr "component_types" .~ component_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the queue."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "shapes"
                                +  type: "list(shape)"
                                +  default_value { list { } }
                                +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
                                +}
                                +attr {
                                +  name: "min_after_dequeue"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements."
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, a random seed is used."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | A queue that randomizes the order of elements.
                                +
                                +randomShuffleQueueV2 :: forall m' . (MonadBuild m') => 
                                +                        [DataType] -- ^ __component_types__: The type of each component in a value.
                                +                        -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue.
                                +randomShuffleQueueV2 = randomShuffleQueueV2' id
                                +randomShuffleQueueV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                         [DataType] -- ^ __component_types__: The type of each component in a value.
                                +                         -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue.
                                +randomShuffleQueueV2' op'options component_types | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "RandomShuffleQueueV2"
                                +                    & opAttr "component_types" .~ component_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the queue."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "component_types"
                                +  type: "list(type)"
                                +  description: "The type of each component in a value."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "shapes"
                                +  type: "list(shape)"
                                +  default_value { list { } }
                                +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
                                +}
                                +attr {
                                +  name: "min_after_dequeue"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements."
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, a random seed is used."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | Outputs random values from a normal distribution.
                                +--
                                +-- The generated values will have mean 0 and standard deviation 1.
                                +randomStandardNormal :: forall v'1 dtype t m' . (MonadBuild m',
                                +                                                 OneOf '[Data.Word.Word16,
                                +                                                         Double, Float] dtype,
                                +                                                 OneOf '[Data.Int.Int32,
                                +                                                         Data.Int.Int64] t) => 
                                +                        Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                        -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random normal values.
                                +randomStandardNormal = randomStandardNormal' id
                                +randomStandardNormal' :: forall v'1 dtype t m' . (MonadBuild m',
                                +                                                  OneOf '[Data.Word.Word16,
                                +                                                          Double, Float] dtype,
                                +                                                  OneOf '[Data.Int.Int32,
                                +                                                          Data.Int.Int64] t) =>
                                +                         OpParams ->
                                +                         Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                         -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random normal values.
                                +randomStandardNormal' op'options shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape]
                                +        buildOp [] (opDef "RandomStandardNormal"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "The shape of the output tensor."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A tensor of the specified shape filled with random normal values."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the output."
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Outputs random values from a uniform distribution.
                                +--
                                +-- The generated values follow a uniform distribution in the range `[0, 1)`. The
                                +-- lower bound 0 is included in the range, while the upper bound 1 is excluded.
                                +randomUniform :: forall v'1 dtype t m' . (MonadBuild m',
                                +                                          OneOf '[Data.Word.Word16, Double,
                                +                                                  Float] dtype,
                                +                                          OneOf '[Data.Int.Int32,
                                +                                                  Data.Int.Int64] t) => 
                                +                 Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                 -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with uniform random values.
                                +randomUniform = randomUniform' id
                                +randomUniform' :: forall v'1 dtype t m' . (MonadBuild m',
                                +                                           OneOf '[Data.Word.Word16, Double,
                                +                                                   Float] dtype,
                                +                                           OneOf '[Data.Int.Int32,
                                +                                                   Data.Int.Int64] t) =>
                                +                  OpParams ->
                                +                  Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                  -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with uniform random values.
                                +randomUniform' op'options shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape]
                                +        buildOp [] (opDef "RandomUniform"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "The shape of the output tensor."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A tensor of the specified shape filled with uniform random values."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the output."
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Outputs random integers from a uniform distribution.
                                +--
                                +-- The generated values are uniform integers in the range `[minval, maxval)`.
                                +-- The lower bound `minval` is included in the range, while the upper bound
                                +-- `maxval` is excluded.
                                +-- 
                                +-- The random integers are slightly biased unless `maxval - minval` is an exact
                                +-- power of two.  The bias is small for values of `maxval - minval` significantly
                                +-- smaller than the range of the output (either `2^32` or `2^64`).
                                +randomUniformInt :: forall v'1 v'2 v'3 tout t m' . (MonadBuild m',
                                +                                                    OneOf '[Data.Int.Int32,
                                +                                                            Data.Int.Int64] tout,
                                +                                                    OneOf '[Data.Int.Int32,
                                +                                                            Data.Int.Int64] t) =>
                                +                    
                                +                    Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                    -> Tensor v'2 tout -- ^ __minval__: 0-D.  Inclusive lower bound on the generated integers.
                                +                    -> Tensor v'3 tout -- ^ __maxval__: 0-D.  Exclusive upper bound on the generated integers.
                                +                    -> m' (Tensor Value tout) -- ^ __output__: A tensor of the specified shape filled with uniform random integers.
                                +randomUniformInt = randomUniformInt' id
                                +randomUniformInt' :: forall v'1 v'2 v'3 tout t m' . (MonadBuild m',
                                +                                                     OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] tout,
                                +                                                     OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] t) =>
                                +                     OpParams ->
                                +                     Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                     -> Tensor v'2 tout -- ^ __minval__: 0-D.  Inclusive lower bound on the generated integers.
                                +                     -> Tensor v'3 tout -- ^ __maxval__: 0-D.  Exclusive upper bound on the generated integers.
                                +                     -> m' (Tensor Value tout) -- ^ __output__: A tensor of the specified shape filled with uniform random integers.
                                +randomUniformInt' op'options shape minval maxval | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape,
                                +                                                             buildInputs minval,
                                +                                                             buildInputs maxval]
                                +        buildOp [] (opDef "RandomUniformInt"
                                +                    & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "The shape of the output tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "minval"
                                +  description: "0-D.  Inclusive lower bound on the generated integers."
                                +  type_attr: "Tout"
                                +}
                                +input_arg {
                                +  name: "maxval"
                                +  description: "0-D.  Exclusive upper bound on the generated integers."
                                +  type_attr: "Tout"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A tensor of the specified shape filled with uniform random integers."
                                +  type_attr: "Tout"
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "Tout"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Creates a sequence of numbers.
                                +--
                                +-- This operation creates a sequence of numbers that begins at `start` and
                                +-- extends by increments of `delta` up to but not including `limit`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 'start' is 3
                                +-- # 'limit' is 18
                                +-- # 'delta' is 3
                                +-- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
                                +-- ```
                                +range :: forall v'1 v'2 v'3 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                            Double, Float] tidx) => 
                                +         Tensor v'1 tidx -- ^ __start__: 0-D (scalar). First entry in the sequence.
                                +         -> Tensor v'2 tidx -- ^ __limit__: 0-D (scalar). Upper limit of sequence, exclusive.
                                +         -> Tensor v'3 tidx -- ^ __delta__: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
                                +         -> Tensor Build tidx -- ^ __output__: 1-D.
                                +range = range' id
                                +range' :: forall v'1 v'2 v'3 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                             Double, Float] tidx) => OpParams ->
                                +          Tensor v'1 tidx -- ^ __start__: 0-D (scalar). First entry in the sequence.
                                +          -> Tensor v'2 tidx -- ^ __limit__: 0-D (scalar). Upper limit of sequence, exclusive.
                                +          -> Tensor v'3 tidx -- ^ __delta__: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
                                +          -> Tensor Build tidx -- ^ __output__: 1-D.
                                +range' op'options start limit delta | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs start,
                                +                                                             buildInputs limit,
                                +                                                             buildInputs delta]
                                +        return (opDef "Range"
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "start"
                                +  description: "0-D (scalar). First entry in the sequence."
                                +  type_attr: "Tidx"
                                +}
                                +input_arg {
                                +  name: "limit"
                                +  description: "0-D (scalar). Upper limit of sequence, exclusive."
                                +  type_attr: "Tidx"
                                +}
                                +input_arg {
                                +  name: "delta"
                                +  description: "0-D (scalar). Optional. Default is 1. Number that increments `start`."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg { name: "output" description: "1-D." type_attr: "Tidx" }
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Creates a dataset with a range of values. Corresponds to python's xrange.
                                +
                                +rangeDataset :: forall v'1 v'2 v'3 m' . (MonadBuild m') => 
                                +                [DataType] -- ^ __output_types__
                                +                -> Tensor v'1 Data.Int.Int64 -- ^ __start__: corresponds to start in python's xrange().
                                +                -> Tensor v'2 Data.Int.Int64 -- ^ __stop__: corresponds to stop in python's xrange().
                                +                -> Tensor v'3 Data.Int.Int64 -- ^ __step__: corresponds to step in python's xrange().
                                +                -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +rangeDataset = rangeDataset' id
                                +rangeDataset' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams ->
                                +                 [DataType] -- ^ __output_types__
                                +                 -> Tensor v'1 Data.Int.Int64 -- ^ __start__: corresponds to start in python's xrange().
                                +                 -> Tensor v'2 Data.Int.Int64 -- ^ __stop__: corresponds to stop in python's xrange().
                                +                 -> Tensor v'3 Data.Int.Int64 -- ^ __step__: corresponds to step in python's xrange().
                                +                 -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +rangeDataset' op'options output_types start stop step | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs start,
                                +                                                             buildInputs stop,
                                +                                                             buildInputs step]
                                +        buildOp [] (opDef "RangeDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "start"
                                +  description: "corresponds to start in python\'s xrange()."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "stop"
                                +  description: "corresponds to stop in python\'s xrange()."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "step"
                                +  description: "corresponds to step in python\'s xrange()."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Returns the rank of a tensor.
                                +--
                                +-- This operation returns an integer representing the rank of `input`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
                                +-- # shape of tensor 't' is [2, 2, 3]
                                +-- rank(t) ==> 3
                                +-- ```
                                +-- 
                                +-- **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
                                +-- of a tensor is the number of indices required to uniquely select each element
                                +-- of the tensor. Rank is also known as "order", "degree", or "ndims."
                                +rank :: forall v'1 t . (TensorType t) => 
                                +        Tensor v'1 t -- ^ __input__
                                +        -> Tensor Build Data.Int.Int32 -- ^ __output__
                                +rank = rank' id
                                +rank' :: forall v'1 t . (TensorType t) => OpParams ->
                                +         Tensor v'1 t -- ^ __input__
                                +         -> Tensor Build Data.Int.Int32 -- ^ __output__
                                +rank' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Rank"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type: DT_INT32 }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Reads and outputs the entire contents of the input filename.
                                +
                                +readFile :: 
                                +            Tensor v'1 Data.ByteString.ByteString -- ^ __filename__
                                +            -> Tensor Build Data.ByteString.ByteString -- ^ __contents__
                                +readFile = readFile' id
                                +readFile' :: OpParams ->
                                +             Tensor v'1 Data.ByteString.ByteString -- ^ __filename__
                                +             -> Tensor Build Data.ByteString.ByteString -- ^ __contents__
                                +readFile' op'options filename | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename]
                                +        return (opDef "ReadFile"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "filename" type: DT_STRING }
                                +output_arg { name: "contents" type: DT_STRING }
                                +-}
                                +
                                +-- | Reads the value of a variable.
                                +--
                                +-- The tensor returned by this operation is immutable.
                                +-- 
                                +-- The value returned by this operation is guaranteed to be influenced by all the
                                +-- writes on which this operation depends directly or indirectly, and to not be
                                +-- influenced by any of the writes which depend directly or indirectly on this
                                +-- operation.
                                +readVariableOp :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => 
                                +                  Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                  -> m' (Tensor Value dtype) -- ^ __value__
                                +readVariableOp = readVariableOp' id
                                +readVariableOp' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) =>
                                +                   OpParams ->
                                +                   Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                   -> m' (Tensor Value dtype) -- ^ __value__
                                +readVariableOp' op'options resource | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource]
                                +        buildOp [] (opDef "ReadVariableOp"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "resource"
                                +  description: "handle to the resource in which to store the variable."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg { name: "value" type_attr: "dtype" }
                                +attr {
                                +  name: "dtype" type: "type" description: "the dtype of the value."
                                +}
                                +-}
                                +
                                +-- | Returns the number of records this Reader has produced.
                                +--
                                +-- This is the same as the number of ReaderRead executions that have
                                +-- succeeded.
                                +readerNumRecordsProduced :: forall m' . (MonadBuild m') => 
                                +                            Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +                            -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__
                                +readerNumRecordsProduced = readerNumRecordsProduced' id
                                +readerNumRecordsProduced' :: forall m' . (MonadBuild m') => OpParams ->
                                +                             Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +                             -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__
                                +readerNumRecordsProduced' op'options reader_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle]
                                +        buildOp [] (opDef "ReaderNumRecordsProduced"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg { name: "records_produced" type: DT_INT64 }
                                +-}
                                +
                                +-- | Returns the number of records this Reader has produced.
                                +--
                                +-- This is the same as the number of ReaderRead executions that have
                                +-- succeeded.
                                +readerNumRecordsProducedV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                              Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                              -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__
                                +readerNumRecordsProducedV2 = readerNumRecordsProducedV2' id
                                +readerNumRecordsProducedV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                               Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                               -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__
                                +readerNumRecordsProducedV2' op'options reader_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle]
                                +        buildOp [] (opDef "ReaderNumRecordsProducedV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg { name: "records_produced" type: DT_INT64 }
                                +-}
                                +
                                +-- | Returns the number of work units this Reader has finished processing.
                                +
                                +readerNumWorkUnitsCompleted :: forall m' . (MonadBuild m') => 
                                +                               Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +                               -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__
                                +readerNumWorkUnitsCompleted = readerNumWorkUnitsCompleted' id
                                +readerNumWorkUnitsCompleted' :: forall m' . (MonadBuild m') => OpParams ->
                                +                                Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +                                -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__
                                +readerNumWorkUnitsCompleted' op'options reader_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle]
                                +        buildOp [] (opDef "ReaderNumWorkUnitsCompleted"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg { name: "units_completed" type: DT_INT64 }
                                +-}
                                +
                                +-- | Returns the number of work units this Reader has finished processing.
                                +
                                +readerNumWorkUnitsCompletedV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                                 Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                                 -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__
                                +readerNumWorkUnitsCompletedV2 = readerNumWorkUnitsCompletedV2' id
                                +readerNumWorkUnitsCompletedV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                                  Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                                  -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__
                                +readerNumWorkUnitsCompletedV2' op'options reader_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle]
                                +        buildOp [] (opDef "ReaderNumWorkUnitsCompletedV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg { name: "units_completed" type: DT_INT64 }
                                +-}
                                +
                                +-- | Returns the next record (key, value pair) produced by a Reader.
                                +--
                                +-- Will dequeue from the input queue if necessary (e.g. when the
                                +-- Reader needs to start reading from a new file since it has finished
                                +-- with the previous file).
                                +readerRead :: forall m' . (MonadBuild m') => 
                                +              Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +              -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a Queue, with string work items.
                                +              -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                      Tensor Value Data.ByteString.ByteString))
                                +              -- ^ (__key__, __value__)
                                +              --
                                +              -- * __key__: A scalar.
                                +              --
                                +              -- * __value__: A scalar.
                                +readerRead = readerRead' id
                                +readerRead' :: forall m' . (MonadBuild m') => OpParams ->
                                +               Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +               -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a Queue, with string work items.
                                +               -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                       Tensor Value Data.ByteString.ByteString))
                                +               -- ^ (__key__, __value__)
                                +               --
                                +               -- * __key__: A scalar.
                                +               --
                                +               -- * __value__: A scalar.
                                +readerRead' op'options reader_handle queue_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle,
                                +                                                             buildInputs queue_handle]
                                +        buildOp [] (opDef "ReaderRead"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "queue_handle"
                                +  description: "Handle to a Queue, with string work items."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg { name: "key" description: "A scalar." type: DT_STRING }
                                +output_arg {
                                +  name: "value" description: "A scalar." type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Returns up to `num_records` (key, value) pairs produced by a Reader.
                                +--
                                +-- Will dequeue from the input queue if necessary (e.g. when the
                                +-- Reader needs to start reading from a new file since it has finished
                                +-- with the previous file).
                                +-- It may return less than `num_records` even before the last batch.
                                +readerReadUpTo :: forall v'3 m' . (MonadBuild m') => 
                                +                  Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a `Reader`.
                                +                  -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a `Queue`, with string work items.
                                +                  -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`.
                                +                  -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                          Tensor Value Data.ByteString.ByteString))
                                +                  -- ^ (__keys__, __values__)
                                +                  --
                                +                  -- * __keys__: A 1-D tensor.
                                +                  --
                                +                  -- * __values__: A 1-D tensor.
                                +readerReadUpTo = readerReadUpTo' id
                                +readerReadUpTo' :: forall v'3 m' . (MonadBuild m') => OpParams ->
                                +                   Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a `Reader`.
                                +                   -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a `Queue`, with string work items.
                                +                   -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`.
                                +                   -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                           Tensor Value Data.ByteString.ByteString))
                                +                   -- ^ (__keys__, __values__)
                                +                   --
                                +                   -- * __keys__: A 1-D tensor.
                                +                   --
                                +                   -- * __values__: A 1-D tensor.
                                +readerReadUpTo' op'options reader_handle queue_handle
                                +                num_records | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle,
                                +                                                             buildInputs queue_handle,
                                +                                                             buildInputs num_records]
                                +        buildOp [] (opDef "ReaderReadUpTo"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a `Reader`."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "queue_handle"
                                +  description: "Handle to a `Queue`, with string work items."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "num_records"
                                +  description: "number of records to read from `Reader`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "keys" description: "A 1-D tensor." type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "values" description: "A 1-D tensor." type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Returns up to `num_records` (key, value) pairs produced by a Reader.
                                +--
                                +-- Will dequeue from the input queue if necessary (e.g. when the
                                +-- Reader needs to start reading from a new file since it has finished
                                +-- with the previous file).
                                +-- It may return less than `num_records` even before the last batch.
                                +readerReadUpToV2 :: forall v'1 v'2 v'3 m' . (MonadBuild m') => 
                                +                    Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a `Reader`.
                                +                    -> Tensor v'2 ResourceHandle -- ^ __queue_handle__: Handle to a `Queue`, with string work items.
                                +                    -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`.
                                +                    -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                            Tensor Value Data.ByteString.ByteString))
                                +                    -- ^ (__keys__, __values__)
                                +                    --
                                +                    -- * __keys__: A 1-D tensor.
                                +                    --
                                +                    -- * __values__: A 1-D tensor.
                                +readerReadUpToV2 = readerReadUpToV2' id
                                +readerReadUpToV2' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams ->
                                +                     Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a `Reader`.
                                +                     -> Tensor v'2 ResourceHandle -- ^ __queue_handle__: Handle to a `Queue`, with string work items.
                                +                     -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`.
                                +                     -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                             Tensor Value Data.ByteString.ByteString))
                                +                     -- ^ (__keys__, __values__)
                                +                     --
                                +                     -- * __keys__: A 1-D tensor.
                                +                     --
                                +                     -- * __values__: A 1-D tensor.
                                +readerReadUpToV2' op'options reader_handle queue_handle
                                +                  num_records | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle,
                                +                                                             buildInputs queue_handle,
                                +                                                             buildInputs num_records]
                                +        buildOp [] (opDef "ReaderReadUpToV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a `Reader`."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "queue_handle"
                                +  description: "Handle to a `Queue`, with string work items."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "num_records"
                                +  description: "number of records to read from `Reader`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "keys" description: "A 1-D tensor." type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "values" description: "A 1-D tensor." type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Returns the next record (key, value pair) produced by a Reader.
                                +--
                                +-- Will dequeue from the input queue if necessary (e.g. when the
                                +-- Reader needs to start reading from a new file since it has finished
                                +-- with the previous file).
                                +readerReadV2 :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                -> Tensor v'2 ResourceHandle -- ^ __queue_handle__: Handle to a Queue, with string work items.
                                +                -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                        Tensor Value Data.ByteString.ByteString))
                                +                -- ^ (__key__, __value__)
                                +                --
                                +                -- * __key__: A scalar.
                                +                --
                                +                -- * __value__: A scalar.
                                +readerReadV2 = readerReadV2' id
                                +readerReadV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                 Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                 -> Tensor v'2 ResourceHandle -- ^ __queue_handle__: Handle to a Queue, with string work items.
                                +                 -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                         Tensor Value Data.ByteString.ByteString))
                                +                 -- ^ (__key__, __value__)
                                +                 --
                                +                 -- * __key__: A scalar.
                                +                 --
                                +                 -- * __value__: A scalar.
                                +readerReadV2' op'options reader_handle queue_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle,
                                +                                                             buildInputs queue_handle]
                                +        buildOp [] (opDef "ReaderReadV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "queue_handle"
                                +  description: "Handle to a Queue, with string work items."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg { name: "key" description: "A scalar." type: DT_STRING }
                                +output_arg {
                                +  name: "value" description: "A scalar." type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Restore a Reader to its initial clean state.
                                +
                                +readerReset :: forall m' . (MonadBuild m') => 
                                +               Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +               -> m' (ControlNode)
                                +readerReset = readerReset' id
                                +readerReset' :: forall m' . (MonadBuild m') => OpParams ->
                                +                Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +                -> m' (ControlNode)
                                +readerReset' op'options reader_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle]
                                +        buildOp [] (opDef "ReaderReset"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +-}
                                +
                                +-- | Restore a Reader to its initial clean state.
                                +
                                +readerResetV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                 Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                 -> m' (ControlNode)
                                +readerResetV2 = readerResetV2' id
                                +readerResetV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                  Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                  -> m' (ControlNode)
                                +readerResetV2' op'options reader_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle]
                                +        buildOp [] (opDef "ReaderResetV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_RESOURCE
                                +}
                                +-}
                                +
                                +-- | Restore a reader to a previously saved state.
                                +--
                                +-- Not all Readers support being restored, so this can produce an
                                +-- Unimplemented error.
                                +readerRestoreState :: forall v'2 m' . (MonadBuild m') => 
                                +                      Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +                      -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type
                                +                                                               -- matching reader_handle.
                                +                      -> m' (ControlNode)
                                +readerRestoreState = readerRestoreState' id
                                +readerRestoreState' :: forall v'2 m' . (MonadBuild m') => OpParams ->
                                +                       Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +                       -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type
                                +                                                                -- matching reader_handle.
                                +                       -> m' (ControlNode)
                                +readerRestoreState' op'options reader_handle state | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle,
                                +                                                             buildInputs state]
                                +        buildOp [] (opDef "ReaderRestoreState"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "state"
                                +  description: "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle."
                                +  type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Restore a reader to a previously saved state.
                                +--
                                +-- Not all Readers support being restored, so this can produce an
                                +-- Unimplemented error.
                                +readerRestoreStateV2 :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                        Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                        -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type
                                +                                                                 -- matching reader_handle.
                                +                        -> m' (ControlNode)
                                +readerRestoreStateV2 = readerRestoreStateV2' id
                                +readerRestoreStateV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                         Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                         -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type
                                +                                                                  -- matching reader_handle.
                                +                         -> m' (ControlNode)
                                +readerRestoreStateV2' op'options reader_handle state | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle,
                                +                                                             buildInputs state]
                                +        buildOp [] (opDef "ReaderRestoreStateV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "state"
                                +  description: "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle."
                                +  type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Produce a string tensor that encodes the state of a Reader.
                                +--
                                +-- Not all Readers support being serialized, so this can produce an
                                +-- Unimplemented error.
                                +readerSerializeState :: forall m' . (MonadBuild m') => 
                                +                        Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +                        -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__
                                +readerSerializeState = readerSerializeState' id
                                +readerSerializeState' :: forall m' . (MonadBuild m') => OpParams ->
                                +                         Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
                                +                         -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__
                                +readerSerializeState' op'options reader_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle]
                                +        buildOp [] (opDef "ReaderSerializeState"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +output_arg { name: "state" type: DT_STRING }
                                +-}
                                +
                                +-- | Produce a string tensor that encodes the state of a Reader.
                                +--
                                +-- Not all Readers support being serialized, so this can produce an
                                +-- Unimplemented error.
                                +readerSerializeStateV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                          Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                          -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__
                                +readerSerializeStateV2 = readerSerializeStateV2' id
                                +readerSerializeStateV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                           Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader.
                                +                           -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__
                                +readerSerializeStateV2' op'options reader_handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle]
                                +        buildOp [] (opDef "ReaderSerializeStateV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reader_handle"
                                +  description: "Handle to a Reader."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg { name: "state" type: DT_STRING }
                                +-}
                                +
                                +-- | Returns the real part of a complex number.
                                +--
                                +-- Given a tensor `input` of complex numbers, this operation returns a tensor of
                                +-- type `float` that is the real part of each element in `input`. All elements in
                                +-- `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
                                +--  part returned by this operation and *b* is the imaginary part.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
                                +-- tf.real(input) ==> [-2.25, 3.25]
                                +-- ```
                                +real :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float)] t,
                                +                             OneOf '[Double, Float] tout) => 
                                +        Tensor v'1 t -- ^ __input__
                                +        -> Tensor Build tout -- ^ __output__
                                +real = real' id
                                +real' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float)] t,
                                +                              OneOf '[Double, Float] tout) => OpParams ->
                                +         Tensor v'1 t -- ^ __input__
                                +         -> Tensor Build tout -- ^ __output__
                                +real' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Real"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tout" .~ tensorType (undefined :: tout)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "Tout" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_COMPLEX64 }
                                +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
                                +}
                                +attr {
                                +  name: "Tout"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Returns x / y element-wise for real types.
                                +--
                                +-- If `x` and `y` are reals, this will return the floating-point division.
                                +-- 
                                +-- *NOTE*: `Div` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +realDiv :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor v'2 t -- ^ __y__
                                +           -> Tensor Build t -- ^ __z__
                                +realDiv = realDiv' id
                                +realDiv' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => OpParams ->
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build t -- ^ __z__
                                +realDiv' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "RealDiv"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the reciprocal of x element-wise.
                                +--
                                +-- I.e., \\(y = 1 / x\\).
                                +reciprocal :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float),
                                +                                      Data.Int.Int32, Data.Int.Int64,
                                +                                      Data.Word.Word16, Double, Float] t) => 
                                +              Tensor v'1 t -- ^ __x__
                                +              -> Tensor Build t -- ^ __y__
                                +reciprocal = reciprocal' id
                                +reciprocal' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Int.Int32, Data.Int.Int64,
                                +                                       Data.Word.Word16, Double, Float] t) =>
                                +               OpParams ->
                                +               Tensor v'1 t -- ^ __x__
                                +               -> Tensor Build t -- ^ __y__
                                +reciprocal' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Reciprocal"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the gradient for the inverse of `x` wrt its input.
                                +--
                                +-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
                                +-- is the corresponding input gradient.
                                +reciprocalGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                              (Data.Complex.Complex Float),
                                +                                              Data.Word.Word16, Double,
                                +                                              Float] t) => 
                                +                  Tensor v'1 t -- ^ __x__
                                +                  -> Tensor v'2 t -- ^ __y__
                                +                  -> Tensor Build t -- ^ __z__
                                +reciprocalGrad = reciprocalGrad' id
                                +reciprocalGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                               (Data.Complex.Complex Float),
                                +                                               Data.Word.Word16, Double,
                                +                                               Float] t) => OpParams ->
                                +                   Tensor v'1 t -- ^ __x__
                                +                   -> Tensor v'2 t -- ^ __y__
                                +                   -> Tensor Build t -- ^ __z__
                                +reciprocalGrad' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "ReciprocalGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Emits randomized records.
                                +
                                +recordInput :: forall m' . (MonadBuild m') => 
                                +               m' (Tensor Value Data.ByteString.ByteString) -- ^ __records__: A tensor of shape [batch_size].
                                +recordInput = recordInput' id
                                +recordInput' :: forall m' . (MonadBuild m') => OpParams ->
                                +                m' (Tensor Value Data.ByteString.ByteString) -- ^ __records__: A tensor of shape [batch_size].
                                +recordInput' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "RecordInput"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "records"
                                +  description: "A tensor of shape [batch_size]."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "file_pattern"
                                +  type: "string"
                                +  description: "Glob pattern for the data files."
                                +}
                                +attr {
                                +  name: "file_random_seed"
                                +  type: "int"
                                +  default_value { i: 301 }
                                +  description: "Random seeds used to produce randomized records."
                                +}
                                +attr {
                                +  name: "file_shuffle_shift_ratio"
                                +  type: "float"
                                +  default_value { f: 0.0 }
                                +  description: "Shifts the list of files after the list is randomly\nshuffled."
                                +}
                                +attr {
                                +  name: "file_buffer_size"
                                +  type: "int"
                                +  default_value { i: 10000 }
                                +  description: "The randomization shuffling buffer."
                                +}
                                +attr {
                                +  name: "file_parallelism"
                                +  type: "int"
                                +  default_value { i: 16 }
                                +  description: "How many sstables are opened and concurrently iterated over."
                                +}
                                +attr {
                                +  name: "batch_size"
                                +  type: "int"
                                +  default_value { i: 32 }
                                +  description: "The batch size."
                                +}
                                +-}
                                +
                                +-- | Joins a string Tensor across the given dimensions.
                                +--
                                +-- Computes the string join across dimensions in the given string Tensor of shape
                                +-- `[d_0, d_1, ..., d_n-1]`.  Returns a new Tensor created by joining the input
                                +-- strings with the given separator (default: empty string).  Negative indices are
                                +-- counted backwards from the end, with `-1` being equivalent to `n - 1`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```python
                                +-- # tensor `a` is [["a", "b"], ["c", "d"]]
                                +-- tf.reduce_join(a, 0) ==> ["ac", "bd"]
                                +-- tf.reduce_join(a, 1) ==> ["ab", "cd"]
                                +-- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
                                +-- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
                                +-- tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
                                +-- tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
                                +-- tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
                                +-- tf.reduce_join(a, [0, 1]) ==> ["acbd"]
                                +-- tf.reduce_join(a, [1, 0]) ==> ["abcd"]
                                +-- tf.reduce_join(a, []) ==> ["abcd"]
                                +-- ```
                                +reduceJoin :: 
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __inputs__: The input to be joined.  All reduced indices must have non-zero size.
                                +              -> Tensor v'2 Data.Int.Int32 -- ^ __reduction_indices__: The dimensions to reduce over.  Dimensions are reduced in the
                                +                                           -- order specified.  Omitting `reduction_indices` is equivalent to passing
                                +                                           -- `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
                                +              -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Has shape equal to that of the input with reduced dimensions removed or
                                +              -- set to `1` depending on `keep_dims`.
                                +reduceJoin = reduceJoin' id
                                +reduceJoin' :: OpParams ->
                                +               Tensor v'1 Data.ByteString.ByteString -- ^ __inputs__: The input to be joined.  All reduced indices must have non-zero size.
                                +               -> Tensor v'2 Data.Int.Int32 -- ^ __reduction_indices__: The dimensions to reduce over.  Dimensions are reduced in the
                                +                                            -- order specified.  Omitting `reduction_indices` is equivalent to passing
                                +                                            -- `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
                                +               -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Has shape equal to that of the input with reduced dimensions removed or
                                +               -- set to `1` depending on `keep_dims`.
                                +reduceJoin' op'options inputs reduction_indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs,
                                +                                                             buildInputs reduction_indices]
                                +        return (opDef "ReduceJoin"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "The input to be joined.  All reduced indices must have non-zero size."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "reduction_indices"
                                +  description: "The dimensions to reduce over.  Dimensions are reduced in the\norder specified.  Omitting `reduction_indices` is equivalent to passing\n`[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Has shape equal to that of the input with reduced dimensions removed or\nset to `1` depending on `keep_dims`."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, retain reduced dimensions with length `1`."
                                +}
                                +attr {
                                +  name: "separator"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "The separator to use when joining."
                                +}
                                +-}
                                +
                                +-- | Creates or finds a child frame, and makes `data` available to the child frame.
                                +--
                                +-- The unique `frame_name` is used by the `Executor` to identify frames. If
                                +-- `is_constant` is true, `output` is a constant in the child frame; otherwise
                                +-- it may be changed in the child frame. At most `parallel_iterations` iterations
                                +-- are run in parallel in the child frame.
                                +refEnter :: forall t m' . (MonadBuild m', TensorType t) => 
                                +            Tensor Ref t -- ^ __data__: The tensor to be made available to the child frame.
                                +            -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`.
                                +refEnter = refEnter' id
                                +refEnter' :: forall t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +             Tensor Ref t -- ^ __data__: The tensor to be made available to the child frame.
                                +             -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`.
                                +refEnter' op'options data' | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data']
                                +        buildOp [] (opDef "RefEnter"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "data"
                                +  description: "The tensor to be made available to the child frame."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same tensor as `data`."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "frame_name"
                                +  type: "string"
                                +  description: "The name of the child frame."
                                +}
                                +attr {
                                +  name: "is_constant"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, the output is constant within the child frame."
                                +}
                                +attr {
                                +  name: "parallel_iterations"
                                +  type: "int"
                                +  default_value { i: 10 }
                                +  description: "The number of iterations allowed to run in parallel."
                                +}
                                +-}
                                +
                                +-- | Exits the current frame to its parent frame.
                                +--
                                +-- Exit makes its input `data` available to the parent frame.
                                +refExit :: forall t m' . (MonadBuild m', TensorType t) => 
                                +           Tensor Ref t -- ^ __data__: The tensor to be made available to the parent frame.
                                +           -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`.
                                +refExit = refExit' id
                                +refExit' :: forall t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +            Tensor Ref t -- ^ __data__: The tensor to be made available to the parent frame.
                                +            -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`.
                                +refExit' op'options data' | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data']
                                +        buildOp [] (opDef "RefExit"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "data"
                                +  description: "The tensor to be made available to the parent frame."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same tensor as `data`."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Return the same ref tensor as the input ref tensor.
                                +
                                +refIdentity :: forall t m' . (MonadBuild m', TensorType t) => 
                                +               Tensor Ref t -- ^ __input__
                                +               -> m' (Tensor Ref t) -- ^ __output__
                                +refIdentity = refIdentity' id
                                +refIdentity' :: forall t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +                Tensor Ref t -- ^ __input__
                                +                -> m' (Tensor Ref t) -- ^ __output__
                                +refIdentity' op'options input | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        buildOp [] (opDef "RefIdentity"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" is_ref: true }
                                +output_arg { name: "output" type_attr: "T" is_ref: true }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Forwards the value of an available tensor from `inputs` to `output`.
                                +--
                                +-- `Merge` waits for at least one of the tensors in `inputs` to become available.
                                +-- It is usually combined with `Switch` to implement branching.
                                +-- 
                                +-- `Merge` forwards the first tensor for become available to `output`, and sets
                                +-- `value_index` to its index in `inputs`.
                                +refMerge :: forall t m' . (MonadBuild m', TensorType t) => 
                                +            [Tensor Ref t] -- ^ __inputs__: The input tensors, exactly one of which will become available.
                                +            -> m' ((Tensor Ref t, Tensor Value Data.Int.Int32))
                                +            -- ^ (__output__, __value_index__)
                                +            --
                                +            -- * __output__: Will be set to the available input tensor.
                                +            --
                                +            -- * __value_index__: The index of the chosen input tensor in `inputs`.
                                +refMerge = refMerge' id
                                +refMerge' :: forall t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +             [Tensor Ref t] -- ^ __inputs__: The input tensors, exactly one of which will become available.
                                +             -> m' ((Tensor Ref t, Tensor Value Data.Int.Int32))
                                +             -- ^ (__output__, __value_index__)
                                +             --
                                +             -- * __output__: Will be set to the available input tensor.
                                +             --
                                +             -- * __value_index__: The index of the chosen input tensor in `inputs`.
                                +refMerge' op'options
                                +          inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs]
                                +        buildOp [] (opDef "RefMerge"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "N" .~ n
                                +                    & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length inputs) :: Int64
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "The input tensors, exactly one of which will become available."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Will be set to the available input tensor."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "value_index"
                                +  description: "The index of the chosen input tensor in `inputs`."
                                +  type: DT_INT32
                                +}
                                +attr { name: "T" type: "type" }
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +-}
                                +
                                +-- | Makes its input available to the next iteration.
                                +
                                +refNextIteration :: forall t m' . (MonadBuild m', TensorType t) => 
                                +                    Tensor Ref t -- ^ __data__: The tensor to be made available to the next iteration.
                                +                    -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`.
                                +refNextIteration = refNextIteration' id
                                +refNextIteration' :: forall t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +                     Tensor Ref t -- ^ __data__: The tensor to be made available to the next iteration.
                                +                     -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`.
                                +refNextIteration' op'options data' | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data']
                                +        buildOp [] (opDef "RefNextIteration"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "data"
                                +  description: "The tensor to be made available to the next iteration."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same tensor as `data`."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Forwards the `index`th element of `inputs` to `output`.
                                +
                                +refSelect :: forall v'1 t m' . (MonadBuild m', TensorType t) => 
                                +             Tensor v'1 Data.Int.Int32 -- ^ __index__: A scalar that determines the input that gets selected.
                                +             -> [Tensor Ref t] -- ^ __inputs__: A list of ref tensors, one of which will be forwarded to `output`.
                                +             -> m' (Tensor Ref t) -- ^ __output__: The forwarded tensor.
                                +refSelect = refSelect' id
                                +refSelect' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +              Tensor v'1 Data.Int.Int32 -- ^ __index__: A scalar that determines the input that gets selected.
                                +              -> [Tensor Ref t] -- ^ __inputs__: A list of ref tensors, one of which will be forwarded to `output`.
                                +              -> m' (Tensor Ref t) -- ^ __output__: The forwarded tensor.
                                +refSelect' op'options index
                                +           inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs index,
                                +                                                             buildInputs inputs]
                                +        buildOp [] (opDef "RefSelect"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "N" .~ n
                                +                    & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length inputs) :: Int64
                                +{-
                                +input_arg {
                                +  name: "index"
                                +  description: "A scalar that determines the input that gets selected."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "inputs"
                                +  description: "A list of ref tensors, one of which will be forwarded to `output`."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The forwarded tensor."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr { name: "T" type: "type" }
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +-}
                                +
                                +-- | Forwards the ref tensor `data` to the output port determined by `pred`.
                                +--
                                +-- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
                                +-- the data goes to `output_false`.
                                +-- 
                                +-- See also `Switch` and `Merge`.
                                +refSwitch :: forall v'2 t m' . (MonadBuild m', TensorType t) => 
                                +             Tensor Ref t -- ^ __data__: The ref tensor to be forwarded to the appropriate output.
                                +             -> Tensor v'2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data.
                                +             -> m' ((Tensor Ref t, Tensor Ref t))
                                +             -- ^ (__output_false__, __output_true__)
                                +             --
                                +             -- * __output_false__: If `pred` is false, data will be forwarded to this output.
                                +             --
                                +             -- * __output_true__: If `pred` is true, data will be forwarded to this output.
                                +refSwitch = refSwitch' id
                                +refSwitch' :: forall v'2 t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +              Tensor Ref t -- ^ __data__: The ref tensor to be forwarded to the appropriate output.
                                +              -> Tensor v'2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data.
                                +              -> m' ((Tensor Ref t, Tensor Ref t))
                                +              -- ^ (__output_false__, __output_true__)
                                +              --
                                +              -- * __output_false__: If `pred` is false, data will be forwarded to this output.
                                +              --
                                +              -- * __output_true__: If `pred` is true, data will be forwarded to this output.
                                +refSwitch' op'options data' pred | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs pred]
                                +        buildOp [] (opDef "RefSwitch"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "data"
                                +  description: "The ref tensor to be forwarded to the appropriate output."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "pred"
                                +  description: "A scalar that specifies which output port will receive data."
                                +  type: DT_BOOL
                                +}
                                +output_arg {
                                +  name: "output_false"
                                +  description: "If `pred` is false, data will be forwarded to this output."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +output_arg {
                                +  name: "output_true"
                                +  description: "If `pred` is true, data will be forwarded to this output."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Computes rectified linear: `max(features, 0)`.
                                +
                                +relu :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64,
                                +                                Data.Int.Int8, Data.Word.Word16,
                                +                                Data.Word.Word8, Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __features__
                                +        -> Tensor Build t -- ^ __activations__
                                +relu = relu' id
                                +relu' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64,
                                +                                 Data.Int.Int8, Data.Word.Word16,
                                +                                 Data.Word.Word8, Double, Float] t) =>
                                +         OpParams ->
                                +         Tensor v'1 t -- ^ __features__
                                +         -> Tensor Build t -- ^ __activations__
                                +relu' op'options features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features]
                                +        return (opDef "Relu"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "features" type_attr: "T" }
                                +output_arg { name: "activations" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes rectified linear 6: `min(max(features, 0), 6)`.
                                +
                                +relu6 :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64,
                                +                                 Data.Int.Int8, Data.Word.Word16,
                                +                                 Data.Word.Word8, Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __features__
                                +         -> Tensor Build t -- ^ __activations__
                                +relu6 = relu6' id
                                +relu6' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                  Data.Int.Int64, Data.Int.Int8,
                                +                                  Data.Word.Word16, Data.Word.Word8, Double,
                                +                                  Float] t) => OpParams ->
                                +          Tensor v'1 t -- ^ __features__
                                +          -> Tensor Build t -- ^ __activations__
                                +relu6' op'options features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features]
                                +        return (opDef "Relu6"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "features" type_attr: "T" }
                                +output_arg { name: "activations" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes rectified linear 6 gradients for a Relu6 operation.
                                +
                                +relu6Grad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t) => 
                                +             Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu6 operation.
                                +             -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding Relu6 operation.
                                +             -> Tensor Build t -- ^ __backprops__: The gradients:
                                +             -- `gradients * (features > 0) * (features < 6)`.
                                +relu6Grad = relu6Grad' id
                                +relu6Grad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t) => OpParams ->
                                +              Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu6 operation.
                                +              -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding Relu6 operation.
                                +              -> Tensor Build t -- ^ __backprops__: The gradients:
                                +              -- `gradients * (features > 0) * (features < 6)`.
                                +relu6Grad' op'options gradients features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients,
                                +                                                             buildInputs features]
                                +        return (opDef "Relu6Grad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "gradients"
                                +  description: "The backpropagated gradients to the corresponding Relu6 operation."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "features"
                                +  description: "The features passed as input to the corresponding Relu6 operation."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "backprops"
                                +  description: "The gradients:\n`gradients * (features > 0) * (features < 6)`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes rectified linear gradients for a Relu operation.
                                +
                                +reluGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => 
                                +            Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu operation.
                                +            -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding Relu operation, OR
                                +                            -- the outputs of that operation (both work equivalently).
                                +            -> Tensor Build t -- ^ __backprops__: `gradients * (features > 0)`.
                                +reluGrad = reluGrad' id
                                +reluGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu operation.
                                +             -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding Relu operation, OR
                                +                             -- the outputs of that operation (both work equivalently).
                                +             -> Tensor Build t -- ^ __backprops__: `gradients * (features > 0)`.
                                +reluGrad' op'options gradients features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients,
                                +                                                             buildInputs features]
                                +        return (opDef "ReluGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "gradients"
                                +  description: "The backpropagated gradients to the corresponding Relu operation."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "features"
                                +  description: "The features passed as input to the corresponding Relu operation, OR\nthe outputs of that operation (both work equivalently)."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "backprops"
                                +  description: "`gradients * (features > 0)`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Execute a sub graph on a remote processor.
                                +--
                                +-- The graph specifications(such as graph itself, input tensors and output names)
                                +-- are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
                                +-- as serialized_remote_fused_graph_execute_info.
                                +-- The specifications will be passed to a dedicated registered
                                +-- remote fused graph executor.  The executor will send the graph specifications
                                +-- to a remote processor and execute that graph.  The execution results
                                +-- will be passed to consumer nodes as outputs of this node.
                                +remoteFusedGraphExecute :: forall v'1 tinputs toutputs . (TensorTypes tinputs,
                                +                                                          TensorTypes toutputs) =>
                                +                           
                                +                           TensorList (v'1) tinputs -- ^ __inputs__: Arbitrary number of tensors with arbitrary data types
                                +                           -> TensorList (Build) toutputs -- ^ __outputs__: Arbitrary number of tensors with arbitrary data types
                                +remoteFusedGraphExecute = remoteFusedGraphExecute' id
                                +remoteFusedGraphExecute' :: forall v'1 tinputs toutputs . (TensorTypes tinputs,
                                +                                                           TensorTypes toutputs) =>
                                +                            OpParams ->
                                +                            TensorList (v'1) tinputs -- ^ __inputs__: Arbitrary number of tensors with arbitrary data types
                                +                            -> TensorList (Build) toutputs -- ^ __outputs__: Arbitrary number of tensors with arbitrary data types
                                +remoteFusedGraphExecute' op'options inputs | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs]
                                +        return (opDef "RemoteFusedGraphExecute"
                                +                & opAttr "Tinputs" .~ fromTensorTypes (Proxy :: Proxy tinputs)
                                +                & opAttr "Toutputs" .~ fromTensorTypes (Proxy :: Proxy toutputs)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "Arbitrary number of tensors with arbitrary data types"
                                +  type_list_attr: "Tinputs"
                                +}
                                +output_arg {
                                +  name: "outputs"
                                +  description: "Arbitrary number of tensors with arbitrary data types"
                                +  type_list_attr: "Toutputs"
                                +}
                                +attr { name: "Tinputs" type: "list(type)" has_minimum: true }
                                +attr { name: "Toutputs" type: "list(type)" has_minimum: true }
                                +attr {
                                +  name: "serialized_remote_fused_graph_execute_info"
                                +  type: "string"
                                +  description: "Serialized protocol buffer\nof RemoteFusedGraphExecuteInfo which contains graph specifications."
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that emits the outputs of `input_dataset` `count` times.
                                +
                                +repeatDataset :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                 [DataType] -- ^ __output_types__
                                +                 -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                 -> Tensor v'2 Data.Int.Int64 -- ^ __count__: A scalar representing the number of times that `input_dataset` should
                                +                                              -- be repeated. A value of `-1` indicates that it should be repeated infinitely.
                                +                 -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +repeatDataset = repeatDataset' id
                                +repeatDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                  [DataType] -- ^ __output_types__
                                +                  -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                  -> Tensor v'2 Data.Int.Int64 -- ^ __count__: A scalar representing the number of times that `input_dataset` should
                                +                                               -- be repeated. A value of `-1` indicates that it should be repeated infinitely.
                                +                  -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +repeatDataset' op'options output_types input_dataset count | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset,
                                +                                                             buildInputs count]
                                +        buildOp [] (opDef "RepeatDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input_dataset" type: DT_RESOURCE }
                                +input_arg {
                                +  name: "count"
                                +  description: "A scalar representing the number of times that `input_dataset` should\nbe repeated. A value of `-1` indicates that it should be repeated infinitely."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Given a quantized tensor described by (input, input_min, input_max), outputs a
                                +--
                                +-- range that covers the actual values present in that tensor.  This op is
                                +-- typically used to produce the requested_output_min and requested_output_max for
                                +-- Requantize.
                                +requantizationRange :: forall v'1 v'2 v'3 tinput . (OneOf '[Data.Int.Int16,
                                +                                                            Data.Int.Int32,
                                +                                                            Data.Word.Word16,
                                +                                                            Data.Word.Word8] tinput) =>
                                +                       
                                +                       Tensor v'1 tinput -- ^ __input__
                                +                       -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents.
                                +                       -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents.
                                +                       -> (Tensor Build Float, Tensor Build Float)
                                +                       -- ^ (__output_min__, __output_max__)
                                +                       --
                                +                       -- * __output_min__: The computed min output.
                                +                       --
                                +                       -- * __output_max__: the computed max output.
                                +requantizationRange = requantizationRange' id
                                +requantizationRange' :: forall v'1 v'2 v'3 tinput . (OneOf '[Data.Int.Int16,
                                +                                                             Data.Int.Int32,
                                +                                                             Data.Word.Word16,
                                +                                                             Data.Word.Word8] tinput) =>
                                +                        OpParams ->
                                +                        Tensor v'1 tinput -- ^ __input__
                                +                        -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents.
                                +                        -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents.
                                +                        -> (Tensor Build Float, Tensor Build Float)
                                +                        -- ^ (__output_min__, __output_max__)
                                +                        --
                                +                        -- * __output_min__: The computed min output.
                                +                        --
                                +                        -- * __output_max__: the computed max output.
                                +requantizationRange' op'options input input_min input_max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs input_min,
                                +                                                             buildInputs input_max]
                                +        return (opDef "RequantizationRange"
                                +                & opAttr "Tinput" .~ tensorType (undefined :: tinput)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "Tinput" }
                                +input_arg {
                                +  name: "input_min"
                                +  description: "The float value that the minimum quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "input_max"
                                +  description: "The float value that the maximum quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output_min"
                                +  description: "The computed min output."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output_max"
                                +  description: "the computed max output."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "Tinput"
                                +  type: "type"
                                +  description: "The type of the input."
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Convert the quantized 'input' tensor into a lower-precision 'output', using the
                                +--
                                +-- output range specified with 'requested_output_min' and 'requested_output_max'.
                                +-- 
                                +-- [input_min, input_max] are scalar floats that specify the range for the float
                                +-- interpretation of the 'input' data. For example, if input_min is -1.0f and
                                +-- input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
                                +-- value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
                                +requantize :: forall v'1 v'2 v'3 v'4 v'5 tinput
                                +              out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                  Data.Word.Word16, Data.Word.Word8] tinput,
                                +                          OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                  Data.Word.Word16,
                                +                                  Data.Word.Word8] out_type) => 
                                +              Tensor v'1 tinput -- ^ __input__
                                +              -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents.
                                +              -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents.
                                +              -> Tensor v'4 Float -- ^ __requested_output_min__: The float value that the minimum quantized output value represents.
                                +              -> Tensor v'5 Float -- ^ __requested_output_max__: The float value that the maximum quantized output value represents.
                                +              -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
                                +              -- ^ (__output__, __output_min__, __output_max__)
                                +              --
                                +              -- * __output__
                                +              --
                                +              -- * __output_min__: The requested_output_min value is copied into this output.
                                +              --
                                +              -- * __output_max__: The requested_output_max value is copied into this output.
                                +requantize = requantize' id
                                +requantize' :: forall v'1 v'2 v'3 v'4 v'5 tinput
                                +               out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Word.Word16, Data.Word.Word8] tinput,
                                +                           OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                   Data.Word.Word16,
                                +                                   Data.Word.Word8] out_type) => OpParams ->
                                +               Tensor v'1 tinput -- ^ __input__
                                +               -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents.
                                +               -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents.
                                +               -> Tensor v'4 Float -- ^ __requested_output_min__: The float value that the minimum quantized output value represents.
                                +               -> Tensor v'5 Float -- ^ __requested_output_max__: The float value that the maximum quantized output value represents.
                                +               -> (Tensor Build out_type, Tensor Build Float,
                                +                   Tensor Build Float)
                                +               -- ^ (__output__, __output_min__, __output_max__)
                                +               --
                                +               -- * __output__
                                +               --
                                +               -- * __output_min__: The requested_output_min value is copied into this output.
                                +               --
                                +               -- * __output_max__: The requested_output_max value is copied into this output.
                                +requantize' op'options input input_min input_max requested_output_min
                                +            requested_output_max | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs input_min,
                                +                                                             buildInputs input_max,
                                +                                                             buildInputs requested_output_min,
                                +                                                             buildInputs requested_output_max]
                                +        return (opDef "Requantize"
                                +                & opAttr "Tinput" .~ tensorType (undefined :: tinput)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "Tinput" }
                                +input_arg {
                                +  name: "input_min"
                                +  description: "The float value that the minimum quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "input_max"
                                +  description: "The float value that the maximum quantized input value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "requested_output_min"
                                +  description: "The float value that the minimum quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "requested_output_max"
                                +  description: "The float value that the maximum quantized output value represents."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "output" type_attr: "out_type" }
                                +output_arg {
                                +  name: "output_min"
                                +  description: "The requested_output_min value is copied into this output."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "output_max"
                                +  description: "The requested_output_max value is copied into this output."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "Tinput"
                                +  type: "type"
                                +  description: "The type of the input."
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  description: "The type of the output. Should be a lower bit depth than Tinput."
                                +  allowed_values {
                                +    list {
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT16
                                +      type: DT_QUINT16
                                +      type: DT_QINT32
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Reshapes a tensor.
                                +--
                                +-- Given `tensor`, this operation returns a tensor that has the same values
                                +-- as `tensor` with shape `shape`.
                                +-- 
                                +-- If one component of `shape` is the special value -1, the size of that dimension
                                +-- is computed so that the total size remains constant.  In particular, a `shape`
                                +-- of `[-1]` flattens into 1-D.  At most one component of `shape` can be -1.
                                +-- 
                                +-- If `shape` is 1-D or higher, then the operation returns a tensor with shape
                                +-- `shape` filled with the values of `tensor`. In this case, the number of elements
                                +-- implied by `shape` must be the same as the number of elements in `tensor`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
                                +-- # tensor 't' has shape [9]
                                +-- reshape(t, [3, 3]) ==> [[1, 2, 3],
                                +--                         [4, 5, 6],
                                +--                         [7, 8, 9]]
                                +-- 
                                +-- # tensor 't' is [[[1, 1], [2, 2]],
                                +-- #                [[3, 3], [4, 4]]]
                                +-- # tensor 't' has shape [2, 2, 2]
                                +-- reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
                                +--                         [3, 3, 4, 4]]
                                +-- 
                                +-- # tensor 't' is [[[1, 1, 1],
                                +-- #                 [2, 2, 2]],
                                +-- #                [[3, 3, 3],
                                +-- #                 [4, 4, 4]],
                                +-- #                [[5, 5, 5],
                                +-- #                 [6, 6, 6]]]
                                +-- # tensor 't' has shape [3, 2, 3]
                                +-- # pass '[-1]' to flatten 't'
                                +-- reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
                                +-- 
                                +-- # -1 can also be used to infer the shape
                                +-- 
                                +-- # -1 is inferred to be 9:
                                +-- reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
                                +--                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
                                +-- # -1 is inferred to be 2:
                                +-- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
                                +--                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
                                +-- # -1 is inferred to be 3:
                                +-- reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
                                +--                               [2, 2, 2],
                                +--                               [3, 3, 3]],
                                +--                              [[4, 4, 4],
                                +--                               [5, 5, 5],
                                +--                               [6, 6, 6]]]
                                +-- 
                                +-- # tensor 't' is [7]
                                +-- # shape `[]` reshapes to a scalar
                                +-- reshape(t, []) ==> 7
                                +-- ```
                                +reshape :: forall v'1 v'2 t tshape . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                            Data.Int.Int64] tshape) =>
                                +           
                                +           Tensor v'1 t -- ^ __tensor__
                                +           -> Tensor v'2 tshape -- ^ __shape__: Defines the shape of the output tensor.
                                +           -> Tensor Build t -- ^ __output__
                                +reshape = reshape' id
                                +reshape' :: forall v'1 v'2 t tshape . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] tshape) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __tensor__
                                +            -> Tensor v'2 tshape -- ^ __shape__: Defines the shape of the output tensor.
                                +            -> Tensor Build t -- ^ __output__
                                +reshape' op'options tensor shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor,
                                +                                                             buildInputs shape]
                                +        return (opDef "Reshape"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tshape" .~ tensorType (undefined :: tshape)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "tensor" type_attr: "T" }
                                +input_arg {
                                +  name: "shape"
                                +  description: "Defines the shape of the output tensor."
                                +  type_attr: "Tshape"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tshape"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Resize `images` to `size` using area interpolation.
                                +--
                                +-- Input images can be of different types but output images are always float.
                                +resizeArea :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t) => 
                                +              Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +              -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                           -- new size for the images.
                                +              -> Tensor Build Float -- ^ __resized_images__: 4-D with shape
                                +              -- `[batch, new_height, new_width, channels]`.
                                +resizeArea = resizeArea' id
                                +resizeArea' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16, Data.Word.Word8,
                                +                                           Double, Float] t) => OpParams ->
                                +               Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +               -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                            -- new size for the images.
                                +               -> Tensor Build Float -- ^ __resized_images__: 4-D with shape
                                +               -- `[batch, new_height, new_width, channels]`.
                                +resizeArea' op'options images size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs size]
                                +        return (opDef "ResizeArea"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "resized_images"
                                +  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "align_corners"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
                                +}
                                +-}
                                +
                                +-- | Resize `images` to `size` using bicubic interpolation.
                                +--
                                +-- Input images can be of different types but output images are always float.
                                +resizeBicubic :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t) => 
                                +                 Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +                 -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                              -- new size for the images.
                                +                 -> Tensor Build Float -- ^ __resized_images__: 4-D with shape
                                +                 -- `[batch, new_height, new_width, channels]`.
                                +resizeBicubic = resizeBicubic' id
                                +resizeBicubic' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                              Data.Int.Int64, Data.Int.Int8,
                                +                                              Data.Word.Word16, Data.Word.Word8,
                                +                                              Double, Float] t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +                  -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                               -- new size for the images.
                                +                  -> Tensor Build Float -- ^ __resized_images__: 4-D with shape
                                +                  -- `[batch, new_height, new_width, channels]`.
                                +resizeBicubic' op'options images size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs size]
                                +        return (opDef "ResizeBicubic"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "resized_images"
                                +  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "align_corners"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
                                +}
                                +-}
                                +
                                +-- | Resize `images` to `size` using bilinear interpolation.
                                +--
                                +-- Input images can be of different types but output images are always float.
                                +resizeBilinear :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                              Data.Int.Int64, Data.Int.Int8,
                                +                                              Data.Word.Word16, Data.Word.Word8,
                                +                                              Double, Float] t) => 
                                +                  Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +                  -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                               -- new size for the images.
                                +                  -> Tensor Build Float -- ^ __resized_images__: 4-D with shape
                                +                  -- `[batch, new_height, new_width, channels]`.
                                +resizeBilinear = resizeBilinear' id
                                +resizeBilinear' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                               Data.Int.Int64, Data.Int.Int8,
                                +                                               Data.Word.Word16,
                                +                                               Data.Word.Word8, Double,
                                +                                               Float] t) => OpParams ->
                                +                   Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +                   -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                                -- new size for the images.
                                +                   -> Tensor Build Float -- ^ __resized_images__: 4-D with shape
                                +                   -- `[batch, new_height, new_width, channels]`.
                                +resizeBilinear' op'options images size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs size]
                                +        return (opDef "ResizeBilinear"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "resized_images"
                                +  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "align_corners"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
                                +}
                                +-}
                                +
                                +-- | Computes the gradient of bilinear interpolation.
                                +
                                +resizeBilinearGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double,
                                +                                                  Float] t) => 
                                +                      Tensor v'1 Float -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.
                                +                      -> Tensor v'2 t -- ^ __original_image__: 4-D with shape `[batch, orig_height, orig_width, channels]`,
                                +                                      -- The image tensor that was resized.
                                +                      -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`.
                                +                      -- Gradients with respect to the input image. Input image must have been
                                +                      -- float or double.
                                +resizeBilinearGrad = resizeBilinearGrad' id
                                +resizeBilinearGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double,
                                +                                                   Float] t) => OpParams ->
                                +                       Tensor v'1 Float -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.
                                +                       -> Tensor v'2 t -- ^ __original_image__: 4-D with shape `[batch, orig_height, orig_width, channels]`,
                                +                                       -- The image tensor that was resized.
                                +                       -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`.
                                +                       -- Gradients with respect to the input image. Input image must have been
                                +                       -- float or double.
                                +resizeBilinearGrad' op'options grads original_image | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads,
                                +                                                             buildInputs original_image]
                                +        return (opDef "ResizeBilinearGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "grads"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "original_image"
                                +  description: "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "align_corners"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension."
                                +}
                                +-}
                                +
                                +-- | Resize `images` to `size` using nearest neighbor interpolation.
                                +
                                +resizeNearestNeighbor :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Int.Int64,
                                +                                                     Data.Int.Int8,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8, Double,
                                +                                                     Float] t) => 
                                +                         Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +                         -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                                      -- new size for the images.
                                +                         -> Tensor Build t -- ^ __resized_images__: 4-D with shape
                                +                         -- `[batch, new_height, new_width, channels]`.
                                +resizeNearestNeighbor = resizeNearestNeighbor' id
                                +resizeNearestNeighbor' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t) => OpParams ->
                                +                          Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
                                +                          -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
                                +                                                       -- new size for the images.
                                +                          -> Tensor Build t -- ^ __resized_images__: 4-D with shape
                                +                          -- `[batch, new_height, new_width, channels]`.
                                +resizeNearestNeighbor' op'options images size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images,
                                +                                                             buildInputs size]
                                +        return (opDef "ResizeNearestNeighbor"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "images"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "resized_images"
                                +  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "align_corners"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
                                +}
                                +-}
                                +
                                +-- | Computes the gradient of nearest neighbor interpolation.
                                +
                                +resizeNearestNeighborGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32,
                                +                                                         Data.Int.Int8,
                                +                                                         Data.Word.Word16,
                                +                                                         Data.Word.Word8,
                                +                                                         Double, Float] t) => 
                                +                             Tensor v'1 t -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.
                                +                             -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
                                +                                                          -- original input size.
                                +                             -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
                                +                             -- with respect to the input image.
                                +resizeNearestNeighborGrad = resizeNearestNeighborGrad' id
                                +resizeNearestNeighborGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32,
                                +                                                          Data.Int.Int8,
                                +                                                          Data.Word.Word16,
                                +                                                          Data.Word.Word8,
                                +                                                          Double, Float] t) =>
                                +                              OpParams ->
                                +                              Tensor v'1 t -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.
                                +                              -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
                                +                                                           -- original input size.
                                +                              -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
                                +                              -- with respect to the input image.
                                +resizeNearestNeighborGrad' op'options grads size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads,
                                +                                                             buildInputs size]
                                +        return (opDef "ResizeNearestNeighborGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "grads"
                                +  description: "4-D with shape `[batch, height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The\noriginal input size."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients\nwith respect to the input image."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT32
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "align_corners"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the adadelta scheme.
                                +--
                                +-- accum = rho() * accum + (1 - rho()) * grad.square();
                                +-- update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
                                +-- update_accum = rho() * update_accum + (1 - rho()) * update.square();
                                +-- var -= update;
                                +resourceApplyAdadelta :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t
                                +                         m' . (MonadBuild m',
                                +                               OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t) => 
                                +                         Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                         -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                         -> Tensor v'3 ResourceHandle -- ^ __accum_update__: Should be from a Variable().
                                +                         -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                         -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar.
                                +                         -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
                                +                         -> Tensor v'7 t -- ^ __grad__: The gradient.
                                +                         -> m' (ControlNode)
                                +resourceApplyAdadelta = resourceApplyAdadelta' id
                                +resourceApplyAdadelta' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t
                                +                          m' . (MonadBuild m',
                                +                                OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => OpParams ->
                                +                          Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                          -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                          -> Tensor v'3 ResourceHandle -- ^ __accum_update__: Should be from a Variable().
                                +                          -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                          -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar.
                                +                          -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
                                +                          -> Tensor v'7 t -- ^ __grad__: The gradient.
                                +                          -> m' (ControlNode)
                                +resourceApplyAdadelta' op'options var accum accum_update lr rho epsilon
                                +                       grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs accum_update,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ResourceApplyAdadelta"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum_update"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Constant factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the adagrad scheme.
                                +--
                                +-- accum += grad * grad
                                +-- var -= lr * grad * (1 / sqrt(accum))
                                +resourceApplyAdagrad :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m',
                                +                                                       OneOf '[(Data.Complex.Complex Double),
                                +                                                               (Data.Complex.Complex Float),
                                +                                                               Data.Int.Int16,
                                +                                                               Data.Int.Int32,
                                +                                                               Data.Int.Int64,
                                +                                                               Data.Int.Int8,
                                +                                                               Data.Word.Word16,
                                +                                                               Data.Word.Word8,
                                +                                                               Double,
                                +                                                               Float] t) => 
                                +                        Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                        -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                        -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                        -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                        -> m' (ControlNode)
                                +resourceApplyAdagrad = resourceApplyAdagrad' id
                                +resourceApplyAdagrad' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m',
                                +                                                        OneOf '[(Data.Complex.Complex Double),
                                +                                                                (Data.Complex.Complex Float),
                                +                                                                Data.Int.Int16,
                                +                                                                Data.Int.Int32,
                                +                                                                Data.Int.Int64,
                                +                                                                Data.Int.Int8,
                                +                                                                Data.Word.Word16,
                                +                                                                Data.Word.Word8,
                                +                                                                Double,
                                +                                                                Float] t) =>
                                +                         OpParams ->
                                +                         Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                         -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                         -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                         -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                         -> m' (ControlNode)
                                +resourceApplyAdagrad' op'options var accum lr grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ResourceApplyAdagrad"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the proximal adagrad scheme.
                                +
                                +resourceApplyAdagradDA :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t
                                +                          m' . (MonadBuild m',
                                +                                OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t) => 
                                +                          Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                          -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__: Should be from a Variable().
                                +                          -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__: Should be from a Variable().
                                +                          -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                          -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                          -> Tensor v'6 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                          -> Tensor v'7 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                          -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
                                +                          -> m' (ControlNode)
                                +resourceApplyAdagradDA = resourceApplyAdagradDA' id
                                +resourceApplyAdagradDA' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t
                                +                           m' . (MonadBuild m',
                                +                                 OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t) => OpParams ->
                                +                           Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                           -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__: Should be from a Variable().
                                +                           -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__: Should be from a Variable().
                                +                           -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                           -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                           -> Tensor v'6 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                           -> Tensor v'7 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                           -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
                                +                           -> m' (ControlNode)
                                +resourceApplyAdagradDA' op'options var gradient_accumulator
                                +                        gradient_squared_accumulator grad lr l1 l2
                                +                        global_step | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs gradient_accumulator,
                                +                                                             buildInputs gradient_squared_accumulator,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs global_step]
                                +        buildOp [] (opDef "ResourceApplyAdagradDA"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "gradient_accumulator"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "gradient_squared_accumulator"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "global_step"
                                +  description: "Training step number. Must be a scalar."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the Adam algorithm.
                                +--
                                +-- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
                                +-- m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
                                +-- v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
                                +-- variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
                                +resourceApplyAdam :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t
                                +                     m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double),
                                +                                                  (Data.Complex.Complex Float),
                                +                                                  Data.Int.Int16,
                                +                                                  Data.Int.Int32,
                                +                                                  Data.Int.Int64, Data.Int.Int8,
                                +                                                  Data.Word.Word16,
                                +                                                  Data.Word.Word8, Double,
                                +                                                  Float] t) => 
                                +                     Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                     -> Tensor v'2 ResourceHandle -- ^ __m__: Should be from a Variable().
                                +                     -> Tensor v'3 ResourceHandle -- ^ __v__: Should be from a Variable().
                                +                     -> Tensor v'4 t -- ^ __beta1_power__: Must be a scalar.
                                +                     -> Tensor v'5 t -- ^ __beta2_power__: Must be a scalar.
                                +                     -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                     -> Tensor v'7 t -- ^ __beta1__: Momentum factor. Must be a scalar.
                                +                     -> Tensor v'8 t -- ^ __beta2__: Momentum factor. Must be a scalar.
                                +                     -> Tensor v'9 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                     -> Tensor v'10 t -- ^ __grad__: The gradient.
                                +                     -> m' (ControlNode)
                                +resourceApplyAdam = resourceApplyAdam' id
                                +resourceApplyAdam' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t
                                +                      m' . (MonadBuild m',
                                +                            OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => OpParams ->
                                +                      Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                      -> Tensor v'2 ResourceHandle -- ^ __m__: Should be from a Variable().
                                +                      -> Tensor v'3 ResourceHandle -- ^ __v__: Should be from a Variable().
                                +                      -> Tensor v'4 t -- ^ __beta1_power__: Must be a scalar.
                                +                      -> Tensor v'5 t -- ^ __beta2_power__: Must be a scalar.
                                +                      -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                      -> Tensor v'7 t -- ^ __beta1__: Momentum factor. Must be a scalar.
                                +                      -> Tensor v'8 t -- ^ __beta2__: Momentum factor. Must be a scalar.
                                +                      -> Tensor v'9 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                      -> Tensor v'10 t -- ^ __grad__: The gradient.
                                +                      -> m' (ControlNode)
                                +resourceApplyAdam' op'options var m v beta1_power beta2_power lr beta1 beta2
                                +                   epsilon grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs m,
                                +                                                             buildInputs v,
                                +                                                             buildInputs beta1_power,
                                +                                                             buildInputs beta2_power,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs beta1,
                                +                                                             buildInputs beta2,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ResourceApplyAdam"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "m"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "v"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "beta1_power" description: "Must be a scalar." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "beta2_power" description: "Must be a scalar." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "beta1"
                                +  description: "Momentum factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "beta2"
                                +  description: "Momentum factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +attr {
                                +  name: "use_nesterov"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, uses the nesterov update."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the centered RMSProp algorithm.
                                +--
                                +-- The centered RMSProp algorithm uses an estimate of the centered second moment
                                +-- (i.e., the variance) for normalization, as opposed to regular RMSProp, which
                                +-- uses the (uncentered) second moment. This often helps with training, but is
                                +-- slightly more expensive in terms of computation and memory.
                                +-- 
                                +-- Note that in dense implementation of this algorithm, mg, ms, and mom will
                                +-- update even if the grad is zero, but in this sparse implementation, mg, ms,
                                +-- and mom will not update in iterations during which the grad is zero.
                                +-- 
                                +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
                                +-- mean_grad = decay * mean_grad + (1-decay) * gradient
                                +-- 
                                +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
                                +-- 
                                +-- mg <- rho * mg_{t-1} + (1-rho) * grad
                                +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
                                +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
                                +-- var <- var - mom
                                +resourceApplyCenteredRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t
                                +                                m' . (MonadBuild m',
                                +                                      OneOf '[(Data.Complex.Complex Double),
                                +                                              (Data.Complex.Complex Float),
                                +                                              Data.Int.Int16, Data.Int.Int32,
                                +                                              Data.Int.Int64, Data.Int.Int8,
                                +                                              Data.Word.Word16, Data.Word.Word8,
                                +                                              Double, Float] t) => 
                                +                                Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                -> Tensor v'2 ResourceHandle -- ^ __mg__: Should be from a Variable().
                                +                                -> Tensor v'3 ResourceHandle -- ^ __ms__: Should be from a Variable().
                                +                                -> Tensor v'4 ResourceHandle -- ^ __mom__: Should be from a Variable().
                                +                                -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                                -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                                -> Tensor v'7 t -- ^ __momentum__
                                +                                -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                                -> Tensor v'9 t -- ^ __grad__: The gradient.
                                +                                -> m' (ControlNode)
                                +resourceApplyCenteredRMSProp = resourceApplyCenteredRMSProp' id
                                +resourceApplyCenteredRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t
                                +                                 m' . (MonadBuild m',
                                +                                       OneOf '[(Data.Complex.Complex Double),
                                +                                               (Data.Complex.Complex Float),
                                +                                               Data.Int.Int16, Data.Int.Int32,
                                +                                               Data.Int.Int64, Data.Int.Int8,
                                +                                               Data.Word.Word16,
                                +                                               Data.Word.Word8, Double,
                                +                                               Float] t) => OpParams ->
                                +                                 Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                 -> Tensor v'2 ResourceHandle -- ^ __mg__: Should be from a Variable().
                                +                                 -> Tensor v'3 ResourceHandle -- ^ __ms__: Should be from a Variable().
                                +                                 -> Tensor v'4 ResourceHandle -- ^ __mom__: Should be from a Variable().
                                +                                 -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                                 -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                                 -> Tensor v'7 t -- ^ __momentum__
                                +                                 -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                                 -> Tensor v'9 t -- ^ __grad__: The gradient.
                                +                                 -> m' (ControlNode)
                                +resourceApplyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon
                                +                              grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs mg,
                                +                                                             buildInputs ms,
                                +                                                             buildInputs mom,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs momentum,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ResourceApplyCenteredRMSProp"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "mg"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "ms"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "mom"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "momentum" type_attr: "T" }
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the Ftrl-proximal scheme.
                                +--
                                +-- accum_new = accum + grad * grad
                                +-- linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
                                +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
                                +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
                                +-- accum = accum_new
                                +resourceApplyFtrl :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t
                                +                     m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double),
                                +                                                  (Data.Complex.Complex Float),
                                +                                                  Data.Int.Int16,
                                +                                                  Data.Int.Int32,
                                +                                                  Data.Int.Int64, Data.Int.Int8,
                                +                                                  Data.Word.Word16,
                                +                                                  Data.Word.Word8, Double,
                                +                                                  Float] t) => 
                                +                     Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                     -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                     -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable().
                                +                     -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                     -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                     -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
                                +                     -> Tensor v'7 t -- ^ __l2__: L2 regulariation. Must be a scalar.
                                +                     -> Tensor v'8 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                     -> m' (ControlNode)
                                +resourceApplyFtrl = resourceApplyFtrl' id
                                +resourceApplyFtrl' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t
                                +                      m' . (MonadBuild m',
                                +                            OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => OpParams ->
                                +                      Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                      -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                      -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable().
                                +                      -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                      -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                      -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
                                +                      -> Tensor v'7 t -- ^ __l2__: L2 regulariation. Must be a scalar.
                                +                      -> Tensor v'8 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                      -> m' (ControlNode)
                                +resourceApplyFtrl' op'options var accum linear grad lr l1 l2
                                +                   lr_power | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs linear,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs lr_power]
                                +        buildOp [] (opDef "ResourceApplyFtrl"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "linear"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr_power"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the Ftrl-proximal scheme.
                                +--
                                +-- grad_with_shrinkage = grad + 2 * l2_shrinkage * var
                                +-- accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
                                +-- linear += grad_with_shrinkage +
                                +--     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
                                +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
                                +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
                                +-- accum = accum_new
                                +resourceApplyFtrlV2 :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t
                                +                       m' . (MonadBuild m',
                                +                             OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float),
                                +                                     Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => 
                                +                       Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                       -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                       -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable().
                                +                       -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                       -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                       -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
                                +                       -> Tensor v'7 t -- ^ __l2__: L2 shrinkage regulariation. Must be a scalar.
                                +                       -> Tensor v'8 t -- ^ __l2_shrinkage__
                                +                       -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                       -> m' (ControlNode)
                                +resourceApplyFtrlV2 = resourceApplyFtrlV2' id
                                +resourceApplyFtrlV2' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t
                                +                        m' . (MonadBuild m',
                                +                              OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float),
                                +                                      Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Int.Int64, Data.Int.Int8,
                                +                                      Data.Word.Word16, Data.Word.Word8, Double,
                                +                                      Float] t) => OpParams ->
                                +                        Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                        -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                        -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable().
                                +                        -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                        -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                        -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
                                +                        -> Tensor v'7 t -- ^ __l2__: L2 shrinkage regulariation. Must be a scalar.
                                +                        -> Tensor v'8 t -- ^ __l2_shrinkage__
                                +                        -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                        -> m' (ControlNode)
                                +resourceApplyFtrlV2' op'options var accum linear grad lr l1 l2 l2_shrinkage
                                +                     lr_power | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs linear,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs l2_shrinkage,
                                +                                                             buildInputs lr_power]
                                +        buildOp [] (opDef "ResourceApplyFtrlV2"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "linear"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 shrinkage regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "l2_shrinkage" type_attr: "T" }
                                +input_arg {
                                +  name: "lr_power"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' by subtracting 'alpha' * 'delta' from it.
                                +
                                +resourceApplyGradientDescent :: forall v'1 v'2 v'3 t m' . (MonadBuild m',
                                +                                                           OneOf '[(Data.Complex.Complex Double),
                                +                                                                   (Data.Complex.Complex Float),
                                +                                                                   Data.Int.Int16,
                                +                                                                   Data.Int.Int32,
                                +                                                                   Data.Int.Int64,
                                +                                                                   Data.Int.Int8,
                                +                                                                   Data.Word.Word16,
                                +                                                                   Data.Word.Word8,
                                +                                                                   Double,
                                +                                                                   Float] t) => 
                                +                                Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                -> Tensor v'3 t -- ^ __delta__: The change.
                                +                                -> m' (ControlNode)
                                +resourceApplyGradientDescent = resourceApplyGradientDescent' id
                                +resourceApplyGradientDescent' :: forall v'1 v'2 v'3 t m' . (MonadBuild m',
                                +                                                            OneOf '[(Data.Complex.Complex Double),
                                +                                                                    (Data.Complex.Complex Float),
                                +                                                                    Data.Int.Int16,
                                +                                                                    Data.Int.Int32,
                                +                                                                    Data.Int.Int64,
                                +                                                                    Data.Int.Int8,
                                +                                                                    Data.Word.Word16,
                                +                                                                    Data.Word.Word8,
                                +                                                                    Double,
                                +                                                                    Float] t) =>
                                +                                 OpParams ->
                                +                                 Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                 -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                 -> Tensor v'3 t -- ^ __delta__: The change.
                                +                                 -> m' (ControlNode)
                                +resourceApplyGradientDescent' op'options var alpha delta | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs alpha,
                                +                                                             buildInputs delta]
                                +        buildOp [] (opDef "ResourceApplyGradientDescent"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "alpha"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "delta" description: "The change." type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the momentum scheme. Set use_nesterov = True if you
                                +--
                                +-- want to use Nesterov momentum.
                                +-- 
                                +-- accum = accum * momentum + grad
                                +-- var -= lr * accum
                                +resourceApplyMomentum :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m',
                                +                                                            OneOf '[(Data.Complex.Complex Double),
                                +                                                                    (Data.Complex.Complex Float),
                                +                                                                    Data.Int.Int16,
                                +                                                                    Data.Int.Int32,
                                +                                                                    Data.Int.Int64,
                                +                                                                    Data.Int.Int8,
                                +                                                                    Data.Word.Word16,
                                +                                                                    Data.Word.Word8,
                                +                                                                    Double,
                                +                                                                    Float] t) =>
                                +                         
                                +                         Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                         -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                         -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                         -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                         -> Tensor v'5 t -- ^ __momentum__: Momentum. Must be a scalar.
                                +                         -> m' (ControlNode)
                                +resourceApplyMomentum = resourceApplyMomentum' id
                                +resourceApplyMomentum' :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m',
                                +                                                             OneOf '[(Data.Complex.Complex Double),
                                +                                                                     (Data.Complex.Complex Float),
                                +                                                                     Data.Int.Int16,
                                +                                                                     Data.Int.Int32,
                                +                                                                     Data.Int.Int64,
                                +                                                                     Data.Int.Int8,
                                +                                                                     Data.Word.Word16,
                                +                                                                     Data.Word.Word8,
                                +                                                                     Double,
                                +                                                                     Float] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                          -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                          -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                          -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                          -> Tensor v'5 t -- ^ __momentum__: Momentum. Must be a scalar.
                                +                          -> m' (ControlNode)
                                +resourceApplyMomentum' op'options var accum lr grad
                                +                       momentum | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs momentum]
                                +        buildOp [] (opDef "ResourceApplyMomentum"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "momentum"
                                +  description: "Momentum. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +attr {
                                +  name: "use_nesterov"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
                                +}
                                +-}
                                +
                                +-- | Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
                                +--
                                +-- accum += grad * grad
                                +-- prox_v = var - lr * grad * (1 / sqrt(accum))
                                +-- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
                                +resourceApplyProximalAdagrad :: forall v'1 v'2 v'3 v'4 v'5 v'6 t
                                +                                m' . (MonadBuild m',
                                +                                      OneOf '[(Data.Complex.Complex Double),
                                +                                              (Data.Complex.Complex Float),
                                +                                              Data.Int.Int16, Data.Int.Int32,
                                +                                              Data.Int.Int64, Data.Int.Int8,
                                +                                              Data.Word.Word16, Data.Word.Word8,
                                +                                              Double, Float] t) => 
                                +                                Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                                -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                                -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                -> Tensor v'6 t -- ^ __grad__: The gradient.
                                +                                -> m' (ControlNode)
                                +resourceApplyProximalAdagrad = resourceApplyProximalAdagrad' id
                                +resourceApplyProximalAdagrad' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t
                                +                                 m' . (MonadBuild m',
                                +                                       OneOf '[(Data.Complex.Complex Double),
                                +                                               (Data.Complex.Complex Float),
                                +                                               Data.Int.Int16, Data.Int.Int32,
                                +                                               Data.Int.Int64, Data.Int.Int8,
                                +                                               Data.Word.Word16,
                                +                                               Data.Word.Word8, Double,
                                +                                               Float] t) => OpParams ->
                                +                                 Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                 -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                                 -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                                 -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                 -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                 -> Tensor v'6 t -- ^ __grad__: The gradient.
                                +                                 -> m' (ControlNode)
                                +resourceApplyProximalAdagrad' op'options var accum lr l1 l2
                                +                              grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ResourceApplyProximalAdagrad"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' as FOBOS algorithm with fixed learning rate.
                                +--
                                +-- prox_v = var - alpha * delta
                                +-- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
                                +resourceApplyProximalGradientDescent :: forall v'1 v'2 v'3 v'4 v'5 t
                                +                                        m' . (MonadBuild m',
                                +                                              OneOf '[(Data.Complex.Complex Double),
                                +                                                      (Data.Complex.Complex Float),
                                +                                                      Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t) => 
                                +                                        Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                        -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                        -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                        -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                        -> Tensor v'5 t -- ^ __delta__: The change.
                                +                                        -> m' (ControlNode)
                                +resourceApplyProximalGradientDescent = resourceApplyProximalGradientDescent' id
                                +resourceApplyProximalGradientDescent' :: forall v'1 v'2 v'3 v'4 v'5 t
                                +                                         m' . (MonadBuild m',
                                +                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                       (Data.Complex.Complex Float),
                                +                                                       Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t) => OpParams ->
                                +                                         Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                         -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                         -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                         -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                         -> Tensor v'5 t -- ^ __delta__: The change.
                                +                                         -> m' (ControlNode)
                                +resourceApplyProximalGradientDescent' op'options var alpha l1 l2
                                +                                      delta | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs alpha,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs delta]
                                +        buildOp [] (opDef "ResourceApplyProximalGradientDescent"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "alpha"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "delta" description: "The change." type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the RMSProp algorithm.
                                +--
                                +-- Note that in dense implementation of this algorithm, ms and mom will
                                +-- update even if the grad is zero, but in this sparse implementation, ms
                                +-- and mom will not update in iterations during which the grad is zero.
                                +-- 
                                +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
                                +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
                                +-- 
                                +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
                                +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
                                +-- var <- var - mom
                                +resourceApplyRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t
                                +                        m' . (MonadBuild m',
                                +                              OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float),
                                +                                      Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Int.Int64, Data.Int.Int8,
                                +                                      Data.Word.Word16, Data.Word.Word8, Double,
                                +                                      Float] t) => 
                                +                        Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                        -> Tensor v'2 ResourceHandle -- ^ __ms__: Should be from a Variable().
                                +                        -> Tensor v'3 ResourceHandle -- ^ __mom__: Should be from a Variable().
                                +                        -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                        -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                        -> Tensor v'6 t -- ^ __momentum__
                                +                        -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                        -> Tensor v'8 t -- ^ __grad__: The gradient.
                                +                        -> m' (ControlNode)
                                +resourceApplyRMSProp = resourceApplyRMSProp' id
                                +resourceApplyRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t
                                +                         m' . (MonadBuild m',
                                +                               OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t) => OpParams ->
                                +                         Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                         -> Tensor v'2 ResourceHandle -- ^ __ms__: Should be from a Variable().
                                +                         -> Tensor v'3 ResourceHandle -- ^ __mom__: Should be from a Variable().
                                +                         -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                         -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                         -> Tensor v'6 t -- ^ __momentum__
                                +                         -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                         -> Tensor v'8 t -- ^ __grad__: The gradient.
                                +                         -> m' (ControlNode)
                                +resourceApplyRMSProp' op'options var ms mom lr rho momentum epsilon
                                +                      grad | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs ms,
                                +                                                             buildInputs mom,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs momentum,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad]
                                +        buildOp [] (opDef "ResourceApplyRMSProp"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "ms"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "mom"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "momentum" type_attr: "T" }
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Gather slices from the variable pointed to by `resource` according to `indices`.
                                +--
                                +-- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
                                +-- Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
                                +-- 
                                +-- ```python
                                +--     # Scalar indices
                                +--     output[:, ..., :] = params[indices, :, ... :]
                                +-- 
                                +--     # Vector indices
                                +--     output[i, :, ..., :] = params[indices[i], :, ... :]
                                +-- 
                                +--     # Higher rank indices
                                +--     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
                                +-- ```
                                +resourceGather :: forall v'1 v'2 dtype tindices m' . (MonadBuild m',
                                +                                                      TensorType dtype,
                                +                                                      OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] tindices) =>
                                +                  
                                +                  Tensor v'1 ResourceHandle -- ^ __resource__
                                +                  -> Tensor v'2 tindices -- ^ __indices__
                                +                  -> m' (Tensor Value dtype) -- ^ __output__
                                +resourceGather = resourceGather' id
                                +resourceGather' :: forall v'1 v'2 dtype tindices m' . (MonadBuild m',
                                +                                                       TensorType dtype,
                                +                                                       OneOf '[Data.Int.Int32,
                                +                                                               Data.Int.Int64] tindices) =>
                                +                   OpParams ->
                                +                   Tensor v'1 ResourceHandle -- ^ __resource__
                                +                   -> Tensor v'2 tindices -- ^ __indices__
                                +                   -> m' (Tensor Value dtype) -- ^ __output__
                                +resourceGather' op'options resource indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "ResourceGather"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "resource" type: DT_RESOURCE }
                                +input_arg { name: "indices" type_attr: "Tindices" }
                                +output_arg { name: "output" type_attr: "dtype" }
                                +attr {
                                +  name: "validate_indices" type: "bool" default_value { b: true }
                                +}
                                +attr { name: "dtype" type: "type" }
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Adds sparse updates to the variable referenced by `resource`.
                                +--
                                +-- This operation computes
                                +-- 
                                +--     # Scalar indices
                                +--     ref[indices, ...] += updates[...]
                                +-- 
                                +--     # Vector indices (for each i)
                                +--     ref[indices[i], ...] += updates[i, ...]
                                +-- 
                                +--     # High rank indices (for each i, ..., j)
                                +--     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
                                +-- 
                                +-- Duplicate entries are handled correctly: if multiple `indices` reference
                                +-- the same location, their contributions add.
                                +-- 
                                +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt>
                                +-- </div>
                                +resourceScatterAdd :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m',
                                +                                                              OneOf '[(Data.Complex.Complex Double),
                                +                                                                      (Data.Complex.Complex Float),
                                +                                                                      Data.Int.Int16,
                                +                                                                      Data.Int.Int32,
                                +                                                                      Data.Int.Int64,
                                +                                                                      Data.Int.Int8,
                                +                                                                      Data.Word.Word16,
                                +                                                                      Data.Word.Word8,
                                +                                                                      Double,
                                +                                                                      Float] dtype,
                                +                                                              OneOf '[Data.Int.Int32,
                                +                                                                      Data.Int.Int64] tindices) =>
                                +                      
                                +                      Tensor v'1 ResourceHandle -- ^ __resource__: Should be from a `Variable` node.
                                +                      -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +                      -> Tensor v'3 dtype -- ^ __updates__: A tensor of updated values to add to `ref`.
                                +                      -> m' (ControlNode)
                                +resourceScatterAdd = resourceScatterAdd' id
                                +resourceScatterAdd' :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m',
                                +                                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                                       (Data.Complex.Complex Float),
                                +                                                                       Data.Int.Int16,
                                +                                                                       Data.Int.Int32,
                                +                                                                       Data.Int.Int64,
                                +                                                                       Data.Int.Int8,
                                +                                                                       Data.Word.Word16,
                                +                                                                       Data.Word.Word8,
                                +                                                                       Double,
                                +                                                                       Float] dtype,
                                +                                                               OneOf '[Data.Int.Int32,
                                +                                                                       Data.Int.Int64] tindices) =>
                                +                       OpParams ->
                                +                       Tensor v'1 ResourceHandle -- ^ __resource__: Should be from a `Variable` node.
                                +                       -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +                       -> Tensor v'3 dtype -- ^ __updates__: A tensor of updated values to add to `ref`.
                                +                       -> m' (ControlNode)
                                +resourceScatterAdd' op'options resource indices updates | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        buildOp [] (opDef "ResourceScatterAdd"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "resource"
                                +  description: "Should be from a `Variable` node."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A tensor of indices into the first dimension of `ref`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A tensor of updated values to add to `ref`."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | var: Should be from a Variable().
                                +
                                +resourceSparseApplyAdadelta :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t tindices
                                +                               m' . (MonadBuild m',
                                +                                     OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float),
                                +                                             Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t,
                                +                                     OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] tindices) => 
                                +                               Tensor v'1 ResourceHandle -- ^ __var__
                                +                               -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                               -> Tensor v'3 ResourceHandle -- ^ __accum_update__: : Should be from a Variable().
                                +                               -> Tensor v'4 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                               -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar.
                                +                               -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
                                +                               -> Tensor v'7 t -- ^ __grad__: The gradient.
                                +                               -> Tensor v'8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                               -> m' (ControlNode)
                                +resourceSparseApplyAdadelta = resourceSparseApplyAdadelta' id
                                +resourceSparseApplyAdadelta' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t
                                +                                tindices m' . (MonadBuild m',
                                +                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                       (Data.Complex.Complex Float),
                                +                                                       Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +                                OpParams ->
                                +                                Tensor v'1 ResourceHandle -- ^ __var__
                                +                                -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                                -> Tensor v'3 ResourceHandle -- ^ __accum_update__: : Should be from a Variable().
                                +                                -> Tensor v'4 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                                -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar.
                                +                                -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
                                +                                -> Tensor v'7 t -- ^ __grad__: The gradient.
                                +                                -> Tensor v'8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                -> m' (ControlNode)
                                +resourceSparseApplyAdadelta' op'options var accum accum_update lr rho epsilon
                                +                             grad indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs accum_update,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "ResourceSparseApplyAdadelta"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "var" type: DT_RESOURCE }
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum_update"
                                +  description: ": Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Constant factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
                                +--
                                +-- That is for rows we have grad for, we update var and accum as follows:
                                +-- accum += grad * grad
                                +-- var -= lr * grad * (1 / sqrt(accum))
                                +resourceSparseApplyAdagrad :: forall v'1 v'2 v'3 v'4 v'5 t tindices
                                +                              m' . (MonadBuild m',
                                +                                    OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t,
                                +                                    OneOf '[Data.Int.Int32,
                                +                                            Data.Int.Int64] tindices) => 
                                +                              Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                              -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                              -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                              -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                              -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                              -> m' (ControlNode)
                                +resourceSparseApplyAdagrad = resourceSparseApplyAdagrad' id
                                +resourceSparseApplyAdagrad' :: forall v'1 v'2 v'3 v'4 v'5 t tindices
                                +                               m' . (MonadBuild m',
                                +                                     OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float),
                                +                                             Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t,
                                +                                     OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] tindices) =>
                                +                               OpParams ->
                                +                               Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                               -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                               -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                               -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                               -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                               -> m' (ControlNode)
                                +resourceSparseApplyAdagrad' op'options var accum lr grad
                                +                            indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "ResourceSparseApplyAdagrad"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
                                +
                                +resourceSparseApplyAdagradDA :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t
                                +                                tindices m' . (MonadBuild m',
                                +                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                       (Data.Complex.Complex Float),
                                +                                                       Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +                                
                                +                                Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__: Should be from a Variable().
                                +                                -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__: Should be from a Variable().
                                +                                -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                                -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                -> Tensor v'6 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                                -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
                                +                                -> m' (ControlNode)
                                +resourceSparseApplyAdagradDA = resourceSparseApplyAdagradDA' id
                                +resourceSparseApplyAdagradDA' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t
                                +                                 tindices m' . (MonadBuild m',
                                +                                                OneOf '[(Data.Complex.Complex Double),
                                +                                                        (Data.Complex.Complex Float),
                                +                                                        Data.Int.Int16,
                                +                                                        Data.Int.Int32,
                                +                                                        Data.Int.Int64,
                                +                                                        Data.Int.Int8,
                                +                                                        Data.Word.Word16,
                                +                                                        Data.Word.Word8, Double,
                                +                                                        Float] t,
                                +                                                OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] tindices) =>
                                +                                 OpParams ->
                                +                                 Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                 -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__: Should be from a Variable().
                                +                                 -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__: Should be from a Variable().
                                +                                 -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                                 -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                 -> Tensor v'6 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                                 -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                 -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                 -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
                                +                                 -> m' (ControlNode)
                                +resourceSparseApplyAdagradDA' op'options var gradient_accumulator
                                +                              gradient_squared_accumulator grad indices lr l1 l2
                                +                              global_step | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs gradient_accumulator,
                                +                                                             buildInputs gradient_squared_accumulator,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs global_step]
                                +        buildOp [] (opDef "ResourceSparseApplyAdagradDA"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "gradient_accumulator"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "gradient_squared_accumulator"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "global_step"
                                +  description: "Training step number. Must be a scalar."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the centered RMSProp algorithm.
                                +--
                                +-- The centered RMSProp algorithm uses an estimate of the centered second moment
                                +-- (i.e., the variance) for normalization, as opposed to regular RMSProp, which
                                +-- uses the (uncentered) second moment. This often helps with training, but is
                                +-- slightly more expensive in terms of computation and memory.
                                +-- 
                                +-- Note that in dense implementation of this algorithm, mg, ms, and mom will
                                +-- update even if the grad is zero, but in this sparse implementation, mg, ms,
                                +-- and mom will not update in iterations during which the grad is zero.
                                +-- 
                                +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
                                +-- mean_grad = decay * mean_grad + (1-decay) * gradient
                                +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
                                +-- 
                                +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
                                +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
                                +-- var <- var - mom
                                +resourceSparseApplyCenteredRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9
                                +                                      v'10 t tindices m' . (MonadBuild m',
                                +                                                            OneOf '[(Data.Complex.Complex Double),
                                +                                                                    (Data.Complex.Complex Float),
                                +                                                                    Data.Int.Int16,
                                +                                                                    Data.Int.Int32,
                                +                                                                    Data.Int.Int64,
                                +                                                                    Data.Int.Int8,
                                +                                                                    Data.Word.Word16,
                                +                                                                    Data.Word.Word8,
                                +                                                                    Double,
                                +                                                                    Float] t,
                                +                                                            OneOf '[Data.Int.Int32,
                                +                                                                    Data.Int.Int64] tindices) =>
                                +                                      
                                +                                      Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                      -> Tensor v'2 ResourceHandle -- ^ __mg__: Should be from a Variable().
                                +                                      -> Tensor v'3 ResourceHandle -- ^ __ms__: Should be from a Variable().
                                +                                      -> Tensor v'4 ResourceHandle -- ^ __mom__: Should be from a Variable().
                                +                                      -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                                      -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                                      -> Tensor v'7 t -- ^ __momentum__
                                +                                      -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                                      -> Tensor v'9 t -- ^ __grad__: The gradient.
                                +                                      -> Tensor v'10 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
                                +                                      -> m' (ControlNode)
                                +resourceSparseApplyCenteredRMSProp = resourceSparseApplyCenteredRMSProp' id
                                +resourceSparseApplyCenteredRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8
                                +                                       v'9 v'10 t tindices m' . (MonadBuild m',
                                +                                                                 OneOf '[(Data.Complex.Complex Double),
                                +                                                                         (Data.Complex.Complex Float),
                                +                                                                         Data.Int.Int16,
                                +                                                                         Data.Int.Int32,
                                +                                                                         Data.Int.Int64,
                                +                                                                         Data.Int.Int8,
                                +                                                                         Data.Word.Word16,
                                +                                                                         Data.Word.Word8,
                                +                                                                         Double,
                                +                                                                         Float] t,
                                +                                                                 OneOf '[Data.Int.Int32,
                                +                                                                         Data.Int.Int64] tindices) =>
                                +                                       OpParams ->
                                +                                       Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                       -> Tensor v'2 ResourceHandle -- ^ __mg__: Should be from a Variable().
                                +                                       -> Tensor v'3 ResourceHandle -- ^ __ms__: Should be from a Variable().
                                +                                       -> Tensor v'4 ResourceHandle -- ^ __mom__: Should be from a Variable().
                                +                                       -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                                       -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                                       -> Tensor v'7 t -- ^ __momentum__
                                +                                       -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                                       -> Tensor v'9 t -- ^ __grad__: The gradient.
                                +                                       -> Tensor v'10 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
                                +                                       -> m' (ControlNode)
                                +resourceSparseApplyCenteredRMSProp' op'options var mg ms mom lr rho momentum
                                +                                    epsilon grad indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs mg,
                                +                                                             buildInputs ms,
                                +                                                             buildInputs mom,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs momentum,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "ResourceSparseApplyCenteredRMSProp"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "mg"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "ms"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "mom"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "momentum" type_attr: "T" }
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var, ms and mom."
                                +  type_attr: "Tindices"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update relevant entries in '*var' according to the Ftrl-proximal scheme.
                                +--
                                +-- That is for rows we have grad for, we update var, accum and linear as follows:
                                +-- accum_new = accum + grad * grad
                                +-- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
                                +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
                                +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
                                +-- accum = accum_new
                                +resourceSparseApplyFtrl :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices
                                +                           m' . (MonadBuild m',
                                +                                 OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t,
                                +                                 OneOf '[Data.Int.Int32,
                                +                                         Data.Int.Int64] tindices) => 
                                +                           Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                           -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                           -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable().
                                +                           -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                           -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                           -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                           -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                           -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                           -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                           -> m' (ControlNode)
                                +resourceSparseApplyFtrl = resourceSparseApplyFtrl' id
                                +resourceSparseApplyFtrl' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t
                                +                            tindices m' . (MonadBuild m',
                                +                                           OneOf '[(Data.Complex.Complex Double),
                                +                                                   (Data.Complex.Complex Float),
                                +                                                   Data.Int.Int16,
                                +                                                   Data.Int.Int32,
                                +                                                   Data.Int.Int64,
                                +                                                   Data.Int.Int8,
                                +                                                   Data.Word.Word16,
                                +                                                   Data.Word.Word8, Double,
                                +                                                   Float] t,
                                +                                           OneOf '[Data.Int.Int32,
                                +                                                   Data.Int.Int64] tindices) =>
                                +                            OpParams ->
                                +                            Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                            -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                            -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable().
                                +                            -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                            -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                            -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                            -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                            -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                            -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                            -> m' (ControlNode)
                                +resourceSparseApplyFtrl' op'options var accum linear grad indices lr l1 l2
                                +                         lr_power | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs linear,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs lr_power]
                                +        buildOp [] (opDef "ResourceSparseApplyFtrl"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "linear"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr_power"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update relevant entries in '*var' according to the Ftrl-proximal scheme.
                                +--
                                +-- That is for rows we have grad for, we update var, accum and linear as follows:
                                +-- grad_with_shrinkage = grad + 2 * l2_shrinkage * var
                                +-- accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
                                +-- linear += grad_with_shrinkage +
                                +--     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
                                +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
                                +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
                                +-- accum = accum_new
                                +resourceSparseApplyFtrlV2 :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t
                                +                             tindices m' . (MonadBuild m',
                                +                                            OneOf '[(Data.Complex.Complex Double),
                                +                                                    (Data.Complex.Complex Float),
                                +                                                    Data.Int.Int16,
                                +                                                    Data.Int.Int32,
                                +                                                    Data.Int.Int64,
                                +                                                    Data.Int.Int8,
                                +                                                    Data.Word.Word16,
                                +                                                    Data.Word.Word8, Double,
                                +                                                    Float] t,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tindices) =>
                                +                             
                                +                             Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                             -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                             -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable().
                                +                             -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                             -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                             -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                             -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                             -> Tensor v'8 t -- ^ __l2__: L2 shrinkage regulariation. Must be a scalar.
                                +                             -> Tensor v'9 t -- ^ __l2_shrinkage__
                                +                             -> Tensor v'10 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                             -> m' (ControlNode)
                                +resourceSparseApplyFtrlV2 = resourceSparseApplyFtrlV2' id
                                +resourceSparseApplyFtrlV2' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t
                                +                              tindices m' . (MonadBuild m',
                                +                                             OneOf '[(Data.Complex.Complex Double),
                                +                                                     (Data.Complex.Complex Float),
                                +                                                     Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Int.Int64,
                                +                                                     Data.Int.Int8,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8, Double,
                                +                                                     Float] t,
                                +                                             OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] tindices) =>
                                +                              OpParams ->
                                +                              Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                              -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                              -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable().
                                +                              -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                              -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                              -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                              -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                              -> Tensor v'8 t -- ^ __l2__: L2 shrinkage regulariation. Must be a scalar.
                                +                              -> Tensor v'9 t -- ^ __l2_shrinkage__
                                +                              -> Tensor v'10 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                              -> m' (ControlNode)
                                +resourceSparseApplyFtrlV2' op'options var accum linear grad indices lr l1 l2
                                +                           l2_shrinkage lr_power | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs linear,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs l2_shrinkage,
                                +                                                             buildInputs lr_power]
                                +        buildOp [] (opDef "ResourceSparseApplyFtrlV2"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "linear"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 shrinkage regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "l2_shrinkage" type_attr: "T" }
                                +input_arg {
                                +  name: "lr_power"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update relevant entries in '*var' and '*accum' according to the momentum scheme.
                                +--
                                +-- Set use_nesterov = True if you want to use Nesterov momentum.
                                +-- 
                                +-- That is for rows we have grad for, we update var and accum as follows:
                                +-- 
                                +-- accum = accum * momentum + grad
                                +-- var -= lr * accum
                                +resourceSparseApplyMomentum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices
                                +                               m' . (MonadBuild m',
                                +                                     OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float),
                                +                                             Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t,
                                +                                     OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] tindices) => 
                                +                               Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                               -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                               -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                               -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                               -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                               -> Tensor v'6 t -- ^ __momentum__: Momentum. Must be a scalar.
                                +                               -> m' (ControlNode)
                                +resourceSparseApplyMomentum = resourceSparseApplyMomentum' id
                                +resourceSparseApplyMomentum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices
                                +                                m' . (MonadBuild m',
                                +                                      OneOf '[(Data.Complex.Complex Double),
                                +                                              (Data.Complex.Complex Float),
                                +                                              Data.Int.Int16, Data.Int.Int32,
                                +                                              Data.Int.Int64, Data.Int.Int8,
                                +                                              Data.Word.Word16, Data.Word.Word8,
                                +                                              Double, Float] t,
                                +                                      OneOf '[Data.Int.Int32,
                                +                                              Data.Int.Int64] tindices) =>
                                +                                OpParams ->
                                +                                Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                                -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                                -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                                -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                -> Tensor v'6 t -- ^ __momentum__: Momentum. Must be a scalar.
                                +                                -> m' (ControlNode)
                                +resourceSparseApplyMomentum' op'options var accum lr grad indices
                                +                             momentum | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs momentum]
                                +        buildOp [] (opDef "ResourceSparseApplyMomentum"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "momentum"
                                +  description: "Momentum. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +attr {
                                +  name: "use_nesterov"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
                                +}
                                +-}
                                +
                                +-- | Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
                                +--
                                +-- That is for rows we have grad for, we update var and accum as follows:
                                +-- accum += grad * grad
                                +-- prox_v = var
                                +-- prox_v -= lr * grad * (1 / sqrt(accum))
                                +-- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
                                +resourceSparseApplyProximalAdagrad :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t
                                +                                      tindices m' . (MonadBuild m',
                                +                                                     OneOf '[(Data.Complex.Complex Double),
                                +                                                             (Data.Complex.Complex Float),
                                +                                                             Data.Int.Int16,
                                +                                                             Data.Int.Int32,
                                +                                                             Data.Int.Int64,
                                +                                                             Data.Int.Int8,
                                +                                                             Data.Word.Word16,
                                +                                                             Data.Word.Word8,
                                +                                                             Double, Float] t,
                                +                                                     OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] tindices) =>
                                +                                      
                                +                                      Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                      -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                                      -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                                      -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                      -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                      -> Tensor v'6 t -- ^ __grad__: The gradient.
                                +                                      -> Tensor v'7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                      -> m' (ControlNode)
                                +resourceSparseApplyProximalAdagrad = resourceSparseApplyProximalAdagrad' id
                                +resourceSparseApplyProximalAdagrad' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t
                                +                                       tindices m' . (MonadBuild m',
                                +                                                      OneOf '[(Data.Complex.Complex Double),
                                +                                                              (Data.Complex.Complex Float),
                                +                                                              Data.Int.Int16,
                                +                                                              Data.Int.Int32,
                                +                                                              Data.Int.Int64,
                                +                                                              Data.Int.Int8,
                                +                                                              Data.Word.Word16,
                                +                                                              Data.Word.Word8,
                                +                                                              Double, Float] t,
                                +                                                      OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] tindices) =>
                                +                                       OpParams ->
                                +                                       Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                       -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable().
                                +                                       -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                                       -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                       -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                       -> Tensor v'6 t -- ^ __grad__: The gradient.
                                +                                       -> Tensor v'7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                       -> m' (ControlNode)
                                +resourceSparseApplyProximalAdagrad' op'options var accum lr l1 l2 grad
                                +                                    indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "ResourceSparseApplyProximalAdagrad"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Sparse update '*var' as FOBOS algorithm with fixed learning rate.
                                +--
                                +-- That is for rows we have grad for, we update var as follows:
                                +-- prox_v = var - alpha * grad
                                +-- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
                                +resourceSparseApplyProximalGradientDescent :: forall v'1 v'2 v'3 v'4 v'5 v'6 t
                                +                                              tindices m' . (MonadBuild m',
                                +                                                             OneOf '[(Data.Complex.Complex Double),
                                +                                                                     (Data.Complex.Complex Float),
                                +                                                                     Data.Int.Int16,
                                +                                                                     Data.Int.Int32,
                                +                                                                     Data.Int.Int64,
                                +                                                                     Data.Int.Int8,
                                +                                                                     Data.Word.Word16,
                                +                                                                     Data.Word.Word8,
                                +                                                                     Double,
                                +                                                                     Float] t,
                                +                                                             OneOf '[Data.Int.Int32,
                                +                                                                     Data.Int.Int64] tindices) =>
                                +                                              
                                +                                              Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                              -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                              -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                              -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                              -> Tensor v'5 t -- ^ __grad__: The gradient.
                                +                                              -> Tensor v'6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                              -> m' (ControlNode)
                                +resourceSparseApplyProximalGradientDescent = resourceSparseApplyProximalGradientDescent' id
                                +resourceSparseApplyProximalGradientDescent' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t
                                +                                               tindices m' . (MonadBuild m',
                                +                                                              OneOf '[(Data.Complex.Complex Double),
                                +                                                                      (Data.Complex.Complex Float),
                                +                                                                      Data.Int.Int16,
                                +                                                                      Data.Int.Int32,
                                +                                                                      Data.Int.Int64,
                                +                                                                      Data.Int.Int8,
                                +                                                                      Data.Word.Word16,
                                +                                                                      Data.Word.Word8,
                                +                                                                      Double,
                                +                                                                      Float] t,
                                +                                                              OneOf '[Data.Int.Int32,
                                +                                                                      Data.Int.Int64] tindices) =>
                                +                                               OpParams ->
                                +                                               Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                                               -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                               -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                               -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                               -> Tensor v'5 t -- ^ __grad__: The gradient.
                                +                                               -> Tensor v'6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                               -> m' (ControlNode)
                                +resourceSparseApplyProximalGradientDescent' op'options var alpha l1 l2 grad
                                +                                            indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs alpha,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "ResourceSparseApplyProximalGradientDescent"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "alpha"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the RMSProp algorithm.
                                +--
                                +-- Note that in dense implementation of this algorithm, ms and mom will
                                +-- update even if the grad is zero, but in this sparse implementation, ms
                                +-- and mom will not update in iterations during which the grad is zero.
                                +-- 
                                +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
                                +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
                                +-- 
                                +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
                                +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
                                +-- var <- var - mom
                                +resourceSparseApplyRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t
                                +                              tindices m' . (MonadBuild m',
                                +                                             OneOf '[(Data.Complex.Complex Double),
                                +                                                     (Data.Complex.Complex Float),
                                +                                                     Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Int.Int64,
                                +                                                     Data.Int.Int8,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8, Double,
                                +                                                     Float] t,
                                +                                             OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] tindices) =>
                                +                              
                                +                              Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                              -> Tensor v'2 ResourceHandle -- ^ __ms__: Should be from a Variable().
                                +                              -> Tensor v'3 ResourceHandle -- ^ __mom__: Should be from a Variable().
                                +                              -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                              -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                              -> Tensor v'6 t -- ^ __momentum__
                                +                              -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                              -> Tensor v'8 t -- ^ __grad__: The gradient.
                                +                              -> Tensor v'9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
                                +                              -> m' (ControlNode)
                                +resourceSparseApplyRMSProp = resourceSparseApplyRMSProp' id
                                +resourceSparseApplyRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t
                                +                               tindices m' . (MonadBuild m',
                                +                                              OneOf '[(Data.Complex.Complex Double),
                                +                                                      (Data.Complex.Complex Float),
                                +                                                      Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t,
                                +                                              OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +                               OpParams ->
                                +                               Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable().
                                +                               -> Tensor v'2 ResourceHandle -- ^ __ms__: Should be from a Variable().
                                +                               -> Tensor v'3 ResourceHandle -- ^ __mom__: Should be from a Variable().
                                +                               -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                               -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                               -> Tensor v'6 t -- ^ __momentum__
                                +                               -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                               -> Tensor v'8 t -- ^ __grad__: The gradient.
                                +                               -> Tensor v'9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
                                +                               -> m' (ControlNode)
                                +resourceSparseApplyRMSProp' op'options var ms mom lr rho momentum epsilon grad
                                +                            indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs ms,
                                +                                                             buildInputs mom,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs momentum,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "ResourceSparseApplyRMSProp"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "ms"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "mom"
                                +  description: "Should be from a Variable()."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "momentum" type_attr: "T" }
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var, ms and mom."
                                +  type_attr: "Tindices"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Assign `value` to the sliced l-value reference of `ref`.
                                +--
                                +-- The values of `value` are assigned to the positions in the variable
                                +-- `ref` that are selected by the slice parameters. The slice parameters
                                +-- `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
                                +-- 
                                +-- NOTE this op currently does not support broadcasting and so `value`'s
                                +-- shape must be exactly the shape produced by the slice of `ref`.
                                +resourceStridedSliceAssign :: forall v'1 v'2 v'3 v'4 v'5 t index
                                +                              m' . (MonadBuild m', TensorType t,
                                +                                    OneOf '[Data.Int.Int32,
                                +                                            Data.Int.Int64] index) => 
                                +                              Tensor v'1 ResourceHandle -- ^ __ref__
                                +                              -> Tensor v'2 index -- ^ __begin__
                                +                              -> Tensor v'3 index -- ^ __end__
                                +                              -> Tensor v'4 index -- ^ __strides__
                                +                              -> Tensor v'5 t -- ^ __value__
                                +                              -> m' (ControlNode)
                                +resourceStridedSliceAssign = resourceStridedSliceAssign' id
                                +resourceStridedSliceAssign' :: forall v'1 v'2 v'3 v'4 v'5 t index
                                +                               m' . (MonadBuild m', TensorType t,
                                +                                     OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] index) =>
                                +                               OpParams ->
                                +                               Tensor v'1 ResourceHandle -- ^ __ref__
                                +                               -> Tensor v'2 index -- ^ __begin__
                                +                               -> Tensor v'3 index -- ^ __end__
                                +                               -> Tensor v'4 index -- ^ __strides__
                                +                               -> Tensor v'5 t -- ^ __value__
                                +                               -> m' (ControlNode)
                                +resourceStridedSliceAssign' op'options ref begin end strides
                                +                            value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs begin,
                                +                                                             buildInputs end,
                                +                                                             buildInputs strides,
                                +                                                             buildInputs value]
                                +        buildOp [] (opDef "ResourceStridedSliceAssign"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Index" .~ tensorType (undefined :: index)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "ref" type: DT_RESOURCE }
                                +input_arg { name: "begin" type_attr: "Index" }
                                +input_arg { name: "end" type_attr: "Index" }
                                +input_arg { name: "strides" type_attr: "Index" }
                                +input_arg { name: "value" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Index"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr { name: "begin_mask" type: "int" default_value { i: 0 } }
                                +attr { name: "end_mask" type: "int" default_value { i: 0 } }
                                +attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } }
                                +attr { name: "new_axis_mask" type: "int" default_value { i: 0 } }
                                +attr {
                                +  name: "shrink_axis_mask" type: "int" default_value { i: 0 }
                                +}
                                +-}
                                +
                                +-- | Restores a tensor from checkpoint files.
                                +--
                                +-- Reads a tensor stored in one or several files. If there are several files (for
                                +-- instance because a tensor was saved as slices), `file_pattern` may contain
                                +-- wildcard symbols (`*` and `?`) in the filename portion only, not in the
                                +-- directory portion.
                                +-- 
                                +-- If a `file_pattern` matches several files, `preferred_shard` can be used to hint
                                +-- in which file the requested tensor is likely to be found. This op will first
                                +-- open the file at index `preferred_shard` in the list of matching files and try
                                +-- to restore tensors from that file.  Only if some tensors or tensor slices are
                                +-- not found in that first file, then the Op opens all the files. Setting
                                +-- `preferred_shard` to match the value passed as the `shard` input
                                +-- of a matching `Save` Op may speed up Restore.  This attribute only affects
                                +-- performance, not correctness.  The default value -1 means files are processed in
                                +-- order.
                                +-- 
                                +-- See also `RestoreSlice`.
                                +restore :: forall v'1 v'2 dt m' . (MonadBuild m', TensorType dt) => 
                                +           Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from
                                +                                                 -- which we read the tensor.
                                +           -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be
                                +                                                    -- restored.
                                +           -> m' (Tensor Value dt) -- ^ __tensor__: The restored tensor.
                                +restore = restore' id
                                +restore' :: forall v'1 v'2 dt m' . (MonadBuild m', TensorType dt) => OpParams ->
                                +            Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from
                                +                                                  -- which we read the tensor.
                                +            -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be
                                +                                                     -- restored.
                                +            -> m' (Tensor Value dt) -- ^ __tensor__: The restored tensor.
                                +restore' op'options file_pattern tensor_name | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs file_pattern,
                                +                                                             buildInputs tensor_name]
                                +        buildOp [] (opDef "Restore"
                                +                    & opAttr "dt" .~ tensorType (undefined :: dt)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "file_pattern"
                                +  description: "Must have a single element. The pattern of the files from\nwhich we read the tensor."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor_name"
                                +  description: "Must have a single element. The name of the tensor to be\nrestored."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "tensor" description: "The restored tensor." type_attr: "dt"
                                +}
                                +attr {
                                +  name: "dt"
                                +  type: "type"
                                +  description: "The type of the tensor to be restored."
                                +}
                                +attr {
                                +  name: "preferred_shard"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "Index of file to open first if multiple files match\n`file_pattern`."
                                +}
                                +-}
                                +
                                +-- | Restores a tensor from checkpoint files.
                                +--
                                +-- This is like `Restore` except that restored tensor can be listed as filling
                                +-- only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the
                                +-- larger tensor and the slice that the restored tensor covers.
                                +-- 
                                +-- The `shape_and_slice` input has the same format as the
                                +-- elements of the `shapes_and_slices` input of the `SaveSlices` op.
                                +restoreSlice :: forall v'1 v'2 v'3 dt m' . (MonadBuild m', TensorType dt) => 
                                +                Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from
                                +                                                      -- which we read the tensor.
                                +                -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be
                                +                                                         -- restored.
                                +                -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slice__: Scalar. The shapes and slice specifications to use when
                                +                                                         -- restoring a tensors.
                                +                -> m' (Tensor Value dt) -- ^ __tensor__: The restored tensor.
                                +restoreSlice = restoreSlice' id
                                +restoreSlice' :: forall v'1 v'2 v'3 dt m' . (MonadBuild m', TensorType dt) =>
                                +                 OpParams ->
                                +                 Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from
                                +                                                       -- which we read the tensor.
                                +                 -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be
                                +                                                          -- restored.
                                +                 -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slice__: Scalar. The shapes and slice specifications to use when
                                +                                                          -- restoring a tensors.
                                +                 -> m' (Tensor Value dt) -- ^ __tensor__: The restored tensor.
                                +restoreSlice' op'options file_pattern tensor_name
                                +              shape_and_slice | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs file_pattern,
                                +                                                             buildInputs tensor_name,
                                +                                                             buildInputs shape_and_slice]
                                +        buildOp [] (opDef "RestoreSlice"
                                +                    & opAttr "dt" .~ tensorType (undefined :: dt)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "file_pattern"
                                +  description: "Must have a single element. The pattern of the files from\nwhich we read the tensor."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor_name"
                                +  description: "Must have a single element. The name of the tensor to be\nrestored."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "shape_and_slice"
                                +  description: "Scalar. The shapes and slice specifications to use when\nrestoring a tensors."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "tensor" description: "The restored tensor." type_attr: "dt"
                                +}
                                +attr {
                                +  name: "dt"
                                +  type: "type"
                                +  description: "The type of the tensor to be restored."
                                +}
                                +attr {
                                +  name: "preferred_shard"
                                +  type: "int"
                                +  default_value { i: -1 }
                                +  description: "Index of file to open first if multiple files match\n`file_pattern`. See the documentation for `Restore`."
                                +}
                                +-}
                                +
                                +-- | Restores tensors from a V2 checkpoint.
                                +--
                                +-- For backward compatibility with the V1 format, this Op currently allows
                                +-- restoring from a V1 checkpoint as well:
                                +--   - This Op first attempts to find the V2 index file pointed to by "prefix", and
                                +--     if found proceed to read it as a V2 checkpoint;
                                +--   - Otherwise the V1 read path is invoked.
                                +-- Relying on this behavior is not recommended, as the ability to fall back to read
                                +-- V1 might be deprecated and eventually removed.
                                +-- 
                                +-- By default, restores the named tensors in full.  If the caller wishes to restore
                                +-- specific slices of stored tensors, "shape_and_slices" should be non-empty
                                +-- strings and correspondingly well-formed.
                                +-- 
                                +-- Callers must ensure all the named tensors are indeed stored in the checkpoint.
                                +restoreV2 :: forall v'1 v'2 v'3 dtypes m' . (MonadBuild m',
                                +                                             TensorTypes dtypes) => 
                                +             Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__: Must have a single element.  The prefix of a V2 checkpoint.
                                +             -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: shape {N}.  The names of the tensors to be restored.
                                +             -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__: shape {N}.  The slice specs of the tensors to be restored.
                                +                                                      -- Empty strings indicate that they are non-partitioned tensors.
                                +             -> m' (TensorList (Value) dtypes) -- ^ __tensors__: shape {N}.  The restored tensors, whose shapes are read from the
                                +             -- checkpoint directly.
                                +restoreV2 = restoreV2' id
                                +restoreV2' :: forall v'1 v'2 v'3 dtypes m' . (MonadBuild m',
                                +                                              TensorTypes dtypes) => OpParams ->
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__: Must have a single element.  The prefix of a V2 checkpoint.
                                +              -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: shape {N}.  The names of the tensors to be restored.
                                +              -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__: shape {N}.  The slice specs of the tensors to be restored.
                                +                                                       -- Empty strings indicate that they are non-partitioned tensors.
                                +              -> m' (TensorList (Value) dtypes) -- ^ __tensors__: shape {N}.  The restored tensors, whose shapes are read from the
                                +              -- checkpoint directly.
                                +restoreV2' op'options prefix tensor_names shape_and_slices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs prefix,
                                +                                                             buildInputs tensor_names,
                                +                                                             buildInputs shape_and_slices]
                                +        buildOp [] (opDef "RestoreV2"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "prefix"
                                +  description: "Must have a single element.  The prefix of a V2 checkpoint."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor_names"
                                +  description: "shape {N}.  The names of the tensors to be restored."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "shape_and_slices"
                                +  description: "shape {N}.  The slice specs of the tensors to be restored.\nEmpty strings indicate that they are non-partitioned tensors."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "tensors"
                                +  description: "shape {N}.  The restored tensors, whose shapes are read from the\ncheckpoint directly."
                                +  type_list_attr: "dtypes"
                                +}
                                +attr {
                                +  name: "dtypes"
                                +  type: "list(type)"
                                +  description: "shape {N}.  The list of expected dtype for the tensors.  Must match\nthose stored in the checkpoint."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Reverses specific dimensions of a tensor.
                                +--
                                +-- Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
                                +-- of `tensor`, this operation reverses each dimension i of `tensor` where
                                +-- `dims[i]` is `True`.
                                +-- 
                                +-- `tensor` can have up to 8 dimensions. The number of dimensions
                                +-- of `tensor` must equal the number of elements in `dims`. In other words:
                                +-- 
                                +-- `rank(tensor) = size(dims)`
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor 't' is [[[[ 0,  1,  2,  3],
                                +-- #                  [ 4,  5,  6,  7],
                                +-- #                  [ 8,  9, 10, 11]],
                                +-- #                 [[12, 13, 14, 15],
                                +-- #                  [16, 17, 18, 19],
                                +-- #                  [20, 21, 22, 23]]]]
                                +-- # tensor 't' shape is [1, 2, 3, 4]
                                +-- 
                                +-- # 'dims' is [False, False, False, True]
                                +-- reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
                                +--                         [ 7,  6,  5,  4],
                                +--                         [ 11, 10, 9, 8]],
                                +--                        [[15, 14, 13, 12],
                                +--                         [19, 18, 17, 16],
                                +--                         [23, 22, 21, 20]]]]
                                +-- 
                                +-- # 'dims' is [False, True, False, False]
                                +-- reverse(t, dims) ==> [[[[12, 13, 14, 15],
                                +--                         [16, 17, 18, 19],
                                +--                         [20, 21, 22, 23]
                                +--                        [[ 0,  1,  2,  3],
                                +--                         [ 4,  5,  6,  7],
                                +--                         [ 8,  9, 10, 11]]]]
                                +-- 
                                +-- # 'dims' is [False, False, True, False]
                                +-- reverse(t, dims) ==> [[[[8, 9, 10, 11],
                                +--                         [4, 5, 6, 7],
                                +--                         [0, 1, 2, 3]]
                                +--                        [[20, 21, 22, 23],
                                +--                         [16, 17, 18, 19],
                                +--                         [12, 13, 14, 15]]]]
                                +-- ```
                                +reverse :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float), Bool,
                                +                                       Data.ByteString.ByteString,
                                +                                       Data.Int.Int32, Data.Int.Int64,
                                +                                       Data.Int.Int8, Data.Word.Word16,
                                +                                       Data.Word.Word8, Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __tensor__: Up to 8-D.
                                +           -> Tensor v'2 Bool -- ^ __dims__: 1-D. The dimensions to reverse.
                                +           -> Tensor Build t -- ^ __output__: The same shape as `tensor`.
                                +reverse = reverse' id
                                +reverse' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float), Bool,
                                +                                        Data.ByteString.ByteString,
                                +                                        Data.Int.Int32, Data.Int.Int64,
                                +                                        Data.Int.Int8, Data.Word.Word16,
                                +                                        Data.Word.Word8, Double, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __tensor__: Up to 8-D.
                                +            -> Tensor v'2 Bool -- ^ __dims__: 1-D. The dimensions to reverse.
                                +            -> Tensor Build t -- ^ __output__: The same shape as `tensor`.
                                +reverse' op'options tensor dims | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor,
                                +                                                             buildInputs dims]
                                +        return (opDef "Reverse"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tensor" description: "Up to 8-D." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "dims"
                                +  description: "1-D. The dimensions to reverse."
                                +  type: DT_BOOL
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same shape as `tensor`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_BOOL
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_STRING
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Reverses variable length slices.
                                +--
                                +-- This op first slices `input` along the dimension `batch_dim`, and for each
                                +-- slice `i`, reverses the first `seq_lengths[i]` elements along
                                +-- the dimension `seq_dim`.
                                +-- 
                                +-- The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
                                +-- and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
                                +-- 
                                +-- The output slice `i` along dimension `batch_dim` is then given by input
                                +-- slice `i`, with the first `seq_lengths[i]` slices along dimension
                                +-- `seq_dim` reversed.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # Given this:
                                +-- batch_dim = 0
                                +-- seq_dim = 1
                                +-- input.dims = (4, 8, ...)
                                +-- seq_lengths = [7, 2, 3, 5]
                                +-- 
                                +-- # then slices of input are reversed on seq_dim, but only up to seq_lengths:
                                +-- output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
                                +-- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
                                +-- output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
                                +-- output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
                                +-- 
                                +-- # while entries past seq_lens are copied through:
                                +-- output[0, 7:, :, ...] = input[0, 7:, :, ...]
                                +-- output[1, 2:, :, ...] = input[1, 2:, :, ...]
                                +-- output[2, 3:, :, ...] = input[2, 3:, :, ...]
                                +-- output[3, 2:, :, ...] = input[3, 2:, :, ...]
                                +-- ```
                                +-- 
                                +-- In contrast, if:
                                +-- 
                                +-- ```
                                +-- # Given this:
                                +-- batch_dim = 2
                                +-- seq_dim = 0
                                +-- input.dims = (8, ?, 4, ...)
                                +-- seq_lengths = [7, 2, 3, 5]
                                +-- 
                                +-- # then slices of input are reversed on seq_dim, but only up to seq_lengths:
                                +-- output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
                                +-- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
                                +-- output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
                                +-- output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
                                +-- 
                                +-- # while entries past seq_lens are copied through:
                                +-- output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
                                +-- output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
                                +-- output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
                                +-- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
                                +-- ```
                                +reverseSequence :: forall v'1 v'2 t tlen . (TensorType t,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tlen) => 
                                +                   Data.Int.Int64 -- ^ __seq_dim__: The dimension which is partially reversed.
                                +                   -> Tensor v'1 t -- ^ __input__: The input to reverse.
                                +                   -> Tensor v'2 tlen -- ^ __seq_lengths__: 1-D with length `input.dims(batch_dim)` and
                                +                                      -- `max(seq_lengths) <= input.dims(seq_dim)`
                                +                   -> Tensor Build t -- ^ __output__: The partially reversed input. It has the same shape as `input`.
                                +reverseSequence = reverseSequence' id
                                +reverseSequence' :: forall v'1 v'2 t tlen . (TensorType t,
                                +                                             OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] tlen) =>
                                +                    OpParams ->
                                +                    Data.Int.Int64 -- ^ __seq_dim__: The dimension which is partially reversed.
                                +                    -> Tensor v'1 t -- ^ __input__: The input to reverse.
                                +                    -> Tensor v'2 tlen -- ^ __seq_lengths__: 1-D with length `input.dims(batch_dim)` and
                                +                                       -- `max(seq_lengths) <= input.dims(seq_dim)`
                                +                    -> Tensor Build t -- ^ __output__: The partially reversed input. It has the same shape as `input`.
                                +reverseSequence' op'options seq_dim input seq_lengths | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs seq_lengths]
                                +        return (opDef "ReverseSequence"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tlen" .~ tensorType (undefined :: tlen)
                                +                & opAttr "seq_dim" .~ seq_dim
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The input to reverse." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "seq_lengths"
                                +  description: "1-D with length `input.dims(batch_dim)` and\n`max(seq_lengths) <= input.dims(seq_dim)`"
                                +  type_attr: "Tlen"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The partially reversed input. It has the same shape as `input`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "seq_dim"
                                +  type: "int"
                                +  description: "The dimension which is partially reversed."
                                +}
                                +attr {
                                +  name: "batch_dim"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "The dimension along which reversal is performed."
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tlen"
                                +  type: "type"
                                +  default_value { type: DT_INT64 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Reverses specific dimensions of a tensor.
                                +--
                                +-- NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
                                +-- `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
                                +-- 
                                +-- Given a `tensor`, and a `int32` tensor `axis` representing the set of
                                +-- dimensions of `tensor` to reverse. This operation reverses each dimension
                                +-- `i` for which there exists `j` s.t. `axis[j] == i`.
                                +-- 
                                +-- `tensor` can have up to 8 dimensions. The number of dimensions specified
                                +-- in `axis` may be 0 or more entries. If an index is specified more than
                                +-- once, a InvalidArgument error is raised.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor 't' is [[[[ 0,  1,  2,  3],
                                +-- #                  [ 4,  5,  6,  7],
                                +-- #                  [ 8,  9, 10, 11]],
                                +-- #                 [[12, 13, 14, 15],
                                +-- #                  [16, 17, 18, 19],
                                +-- #                  [20, 21, 22, 23]]]]
                                +-- # tensor 't' shape is [1, 2, 3, 4]
                                +-- 
                                +-- # 'dims' is [3] or 'dims' is -1
                                +-- reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
                                +--                         [ 7,  6,  5,  4],
                                +--                         [ 11, 10, 9, 8]],
                                +--                        [[15, 14, 13, 12],
                                +--                         [19, 18, 17, 16],
                                +--                         [23, 22, 21, 20]]]]
                                +-- 
                                +-- # 'dims' is '[1]' (or 'dims' is '[-3]')
                                +-- reverse(t, dims) ==> [[[[12, 13, 14, 15],
                                +--                         [16, 17, 18, 19],
                                +--                         [20, 21, 22, 23]
                                +--                        [[ 0,  1,  2,  3],
                                +--                         [ 4,  5,  6,  7],
                                +--                         [ 8,  9, 10, 11]]]]
                                +-- 
                                +-- # 'dims' is '[2]' (or 'dims' is '[-2]')
                                +-- reverse(t, dims) ==> [[[[8, 9, 10, 11],
                                +--                         [4, 5, 6, 7],
                                +--                         [0, 1, 2, 3]]
                                +--                        [[20, 21, 22, 23],
                                +--                         [16, 17, 18, 19],
                                +--                         [12, 13, 14, 15]]]]
                                +-- ```
                                +reverseV2 :: forall v'1 v'2 tidx t . (OneOf '[Data.Int.Int32,
                                +                                              Data.Int.Int64] tidx,
                                +                                      OneOf '[(Data.Complex.Complex Double),
                                +                                              (Data.Complex.Complex Float),
                                +                                              Bool, Data.ByteString.ByteString,
                                +                                              Data.Int.Int32, Data.Int.Int64,
                                +                                              Data.Int.Int8, Data.Word.Word16,
                                +                                              Data.Word.Word8, Double,
                                +                                              Float] t) => 
                                +             Tensor v'1 t -- ^ __tensor__: Up to 8-D.
                                +             -> Tensor v'2 tidx -- ^ __axis__: 1-D. The indices of the dimensions to reverse.
                                +             -> Tensor Build t -- ^ __output__: The same shape as `tensor`.
                                +reverseV2 = reverseV2' id
                                +reverseV2' :: forall v'1 v'2 tidx t . (OneOf '[Data.Int.Int32,
                                +                                               Data.Int.Int64] tidx,
                                +                                       OneOf '[(Data.Complex.Complex Double),
                                +                                               (Data.Complex.Complex Float),
                                +                                               Bool, Data.ByteString.ByteString,
                                +                                               Data.Int.Int32, Data.Int.Int64,
                                +                                               Data.Int.Int8, Data.Word.Word16,
                                +                                               Data.Word.Word8, Double,
                                +                                               Float] t) => OpParams ->
                                +              Tensor v'1 t -- ^ __tensor__: Up to 8-D.
                                +              -> Tensor v'2 tidx -- ^ __axis__: 1-D. The indices of the dimensions to reverse.
                                +              -> Tensor Build t -- ^ __output__: The same shape as `tensor`.
                                +reverseV2' op'options tensor axis | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor,
                                +                                                             buildInputs axis]
                                +        return (opDef "ReverseV2"
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tensor" description: "Up to 8-D." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "axis"
                                +  description: "1-D. The indices of the dimensions to reverse."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same shape as `tensor`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_BOOL
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_STRING
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns element-wise integer closest to x.
                                +--
                                +-- If the result is midway between two representable values,
                                +-- the even representable is chosen.
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- rint(-1.5) ==> -2.0
                                +-- rint(0.5000001) ==> 1.0
                                +-- rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
                                +-- ```
                                +rint :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +rint = rint' id
                                +rint' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +rint' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Rint"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Rounds the values of a tensor to the nearest integer, element-wise.
                                +--
                                +-- Rounds half to even.  Also known as bankers rounding. If you want to round
                                +-- according to the current system rounding mode use std::cint.
                                +round :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                 Data.Int.Int64, Data.Word.Word16, Double,
                                +                                 Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +round = round' id
                                +round' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                  Data.Int.Int64, Data.Word.Word16, Double,
                                +                                  Float] t) => OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +round' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Round"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes reciprocal of square root of x element-wise.
                                +--
                                +-- I.e., \\(y = 1 / \sqrt{x}\\).
                                +rsqrt :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => 
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +rsqrt = rsqrt' id
                                +rsqrt' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float),
                                +                                  Data.Word.Word16, Double, Float] t) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +rsqrt' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Rsqrt"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the gradient for the rsqrt of `x` wrt its input.
                                +--
                                +-- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
                                +-- is the corresponding input gradient.
                                +rsqrtGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Word.Word16, Double, Float] t) => 
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor v'2 t -- ^ __y__
                                +             -> Tensor Build t -- ^ __z__
                                +rsqrtGrad = rsqrtGrad' id
                                +rsqrtGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                          (Data.Complex.Complex Float),
                                +                                          Data.Word.Word16, Double, Float] t) =>
                                +              OpParams ->
                                +              Tensor v'1 t -- ^ __x__
                                +              -> Tensor v'2 t -- ^ __y__
                                +              -> Tensor Build t -- ^ __z__
                                +rsqrtGrad' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "RsqrtGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Generate a single randomly distorted bounding box for an image.
                                +--
                                +-- Bounding box annotations are often supplied in addition to ground-truth labels
                                +-- in image recognition or object localization tasks. A common technique for
                                +-- training such a system is to randomly distort an image while preserving
                                +-- its content, i.e. *data augmentation*. This Op outputs a randomly distorted
                                +-- localization of an object, i.e. bounding box, given an `image_size`,
                                +-- `bounding_boxes` and a series of constraints.
                                +-- 
                                +-- The output of this Op is a single bounding box that may be used to crop the
                                +-- original image. The output is returned as 3 tensors: `begin`, `size` and
                                +-- `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
                                +-- image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
                                +-- what the bounding box looks like.
                                +-- 
                                +-- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
                                +-- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
                                +-- height of the underlying image.
                                +-- 
                                +-- For example,
                                +-- 
                                +-- ```python
                                +--     # Generate a single distorted bounding box.
                                +--     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
                                +--         tf.shape(image),
                                +--         bounding_boxes=bounding_boxes)
                                +-- 
                                +--     # Draw the bounding box in an image summary.
                                +--     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
                                +--                                                   bbox_for_draw)
                                +--     tf.image_summary('images_with_box', image_with_box)
                                +-- 
                                +--     # Employ the bounding box to distort the image.
                                +--     distorted_image = tf.slice(image, begin, size)
                                +-- ```
                                +-- 
                                +-- Note that if no bounding box information is available, setting
                                +-- `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
                                +-- bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
                                +-- false and no bounding boxes are supplied, an error is raised.
                                +sampleDistortedBoundingBox :: forall v'1 v'2 t m' . (MonadBuild m',
                                +                                                     OneOf '[Data.Int.Int16,
                                +                                                             Data.Int.Int32,
                                +                                                             Data.Int.Int64,
                                +                                                             Data.Int.Int8,
                                +                                                             Data.Word.Word8] t) =>
                                +                              
                                +                              Tensor v'1 t -- ^ __image_size__: 1-D, containing `[height, width, channels]`.
                                +                              -> Tensor v'2 Float -- ^ __bounding_boxes__: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
                                +                                                  -- associated with the image.
                                +                              -> m' ((Tensor Value t, Tensor Value t,
                                +                                      Tensor Value Float))
                                +                              -- ^ (__begin__, __size__, __bboxes__)
                                +                              --
                                +                              -- * __begin__: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
                                +                              -- `tf.slice`.
                                +                              --
                                +                              -- * __size__: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
                                +                              -- `tf.slice`.
                                +                              --
                                +                              -- * __bboxes__: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
                                +                              -- Provide as input to `tf.image.draw_bounding_boxes`.
                                +sampleDistortedBoundingBox = sampleDistortedBoundingBox' id
                                +sampleDistortedBoundingBox' :: forall v'1 v'2 t m' . (MonadBuild m',
                                +                                                      OneOf '[Data.Int.Int16,
                                +                                                              Data.Int.Int32,
                                +                                                              Data.Int.Int64,
                                +                                                              Data.Int.Int8,
                                +                                                              Data.Word.Word8] t) =>
                                +                               OpParams ->
                                +                               Tensor v'1 t -- ^ __image_size__: 1-D, containing `[height, width, channels]`.
                                +                               -> Tensor v'2 Float -- ^ __bounding_boxes__: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
                                +                                                   -- associated with the image.
                                +                               -> m' ((Tensor Value t, Tensor Value t,
                                +                                       Tensor Value Float))
                                +                               -- ^ (__begin__, __size__, __bboxes__)
                                +                               --
                                +                               -- * __begin__: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
                                +                               -- `tf.slice`.
                                +                               --
                                +                               -- * __size__: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
                                +                               -- `tf.slice`.
                                +                               --
                                +                               -- * __bboxes__: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
                                +                               -- Provide as input to `tf.image.draw_bounding_boxes`.
                                +sampleDistortedBoundingBox' op'options image_size
                                +                            bounding_boxes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image_size,
                                +                                                             buildInputs bounding_boxes]
                                +        buildOp [] (opDef "SampleDistortedBoundingBox"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "image_size"
                                +  description: "1-D, containing `[height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "bounding_boxes"
                                +  description: "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "begin"
                                +  description: "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "bboxes"
                                +  description: "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`.  Otherwise, it is seeded by a random\nseed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "min_object_covered"
                                +  type: "float"
                                +  default_value { f: 0.1 }
                                +  description: "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied."
                                +}
                                +attr {
                                +  name: "aspect_ratio_range"
                                +  type: "list(float)"
                                +  default_value { list { f: 0.75 f: 1.33 } }
                                +  description: "The cropped area of the image must have an aspect ratio =\nwidth / height within this range."
                                +}
                                +attr {
                                +  name: "area_range"
                                +  type: "list(float)"
                                +  default_value { list { f: 5.0e-2 f: 1.0 } }
                                +  description: "The cropped area of the image must contain a fraction of the\nsupplied image within in this range."
                                +}
                                +attr {
                                +  name: "max_attempts"
                                +  type: "int"
                                +  default_value { i: 100 }
                                +  description: "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage."
                                +}
                                +attr {
                                +  name: "use_image_if_no_bounding_boxes"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error."
                                +}
                                +-}
                                +
                                +-- | Generate a single randomly distorted bounding box for an image.
                                +--
                                +-- Bounding box annotations are often supplied in addition to ground-truth labels
                                +-- in image recognition or object localization tasks. A common technique for
                                +-- training such a system is to randomly distort an image while preserving
                                +-- its content, i.e. *data augmentation*. This Op outputs a randomly distorted
                                +-- localization of an object, i.e. bounding box, given an `image_size`,
                                +-- `bounding_boxes` and a series of constraints.
                                +-- 
                                +-- The output of this Op is a single bounding box that may be used to crop the
                                +-- original image. The output is returned as 3 tensors: `begin`, `size` and
                                +-- `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
                                +-- image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
                                +-- what the bounding box looks like.
                                +-- 
                                +-- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
                                +-- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
                                +-- height of the underlying image.
                                +-- 
                                +-- For example,
                                +-- 
                                +-- ```python
                                +--     # Generate a single distorted bounding box.
                                +--     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
                                +--         tf.shape(image),
                                +--         bounding_boxes=bounding_boxes)
                                +-- 
                                +--     # Draw the bounding box in an image summary.
                                +--     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
                                +--                                                   bbox_for_draw)
                                +--     tf.image_summary('images_with_box', image_with_box)
                                +-- 
                                +--     # Employ the bounding box to distort the image.
                                +--     distorted_image = tf.slice(image, begin, size)
                                +-- ```
                                +-- 
                                +-- Note that if no bounding box information is available, setting
                                +-- `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
                                +-- bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
                                +-- false and no bounding boxes are supplied, an error is raised.
                                +sampleDistortedBoundingBoxV2 :: forall v'1 v'2 v'3 t m' . (MonadBuild m',
                                +                                                           OneOf '[Data.Int.Int16,
                                +                                                                   Data.Int.Int32,
                                +                                                                   Data.Int.Int64,
                                +                                                                   Data.Int.Int8,
                                +                                                                   Data.Word.Word8] t) =>
                                +                                
                                +                                Tensor v'1 t -- ^ __image_size__: 1-D, containing `[height, width, channels]`.
                                +                                -> Tensor v'2 Float -- ^ __bounding_boxes__: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
                                +                                                    -- associated with the image.
                                +                                -> Tensor v'3 Float -- ^ __min_object_covered__: The cropped area of the image must contain at least this
                                +                                                    -- fraction of any bounding box supplied. The value of this parameter should be
                                +                                                    -- non-negative. In the case of 0, the cropped area does not need to overlap
                                +                                                    -- any of the bounding boxes supplied.
                                +                                -> m' ((Tensor Value t, Tensor Value t,
                                +                                        Tensor Value Float))
                                +                                -- ^ (__begin__, __size__, __bboxes__)
                                +                                --
                                +                                -- * __begin__: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
                                +                                -- `tf.slice`.
                                +                                --
                                +                                -- * __size__: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
                                +                                -- `tf.slice`.
                                +                                --
                                +                                -- * __bboxes__: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
                                +                                -- Provide as input to `tf.image.draw_bounding_boxes`.
                                +sampleDistortedBoundingBoxV2 = sampleDistortedBoundingBoxV2' id
                                +sampleDistortedBoundingBoxV2' :: forall v'1 v'2 v'3 t m' . (MonadBuild m',
                                +                                                            OneOf '[Data.Int.Int16,
                                +                                                                    Data.Int.Int32,
                                +                                                                    Data.Int.Int64,
                                +                                                                    Data.Int.Int8,
                                +                                                                    Data.Word.Word8] t) =>
                                +                                 OpParams ->
                                +                                 Tensor v'1 t -- ^ __image_size__: 1-D, containing `[height, width, channels]`.
                                +                                 -> Tensor v'2 Float -- ^ __bounding_boxes__: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
                                +                                                     -- associated with the image.
                                +                                 -> Tensor v'3 Float -- ^ __min_object_covered__: The cropped area of the image must contain at least this
                                +                                                     -- fraction of any bounding box supplied. The value of this parameter should be
                                +                                                     -- non-negative. In the case of 0, the cropped area does not need to overlap
                                +                                                     -- any of the bounding boxes supplied.
                                +                                 -> m' ((Tensor Value t, Tensor Value t,
                                +                                         Tensor Value Float))
                                +                                 -- ^ (__begin__, __size__, __bboxes__)
                                +                                 --
                                +                                 -- * __begin__: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
                                +                                 -- `tf.slice`.
                                +                                 --
                                +                                 -- * __size__: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
                                +                                 -- `tf.slice`.
                                +                                 --
                                +                                 -- * __bboxes__: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
                                +                                 -- Provide as input to `tf.image.draw_bounding_boxes`.
                                +sampleDistortedBoundingBoxV2' op'options image_size bounding_boxes
                                +                              min_object_covered | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image_size,
                                +                                                             buildInputs bounding_boxes,
                                +                                                             buildInputs min_object_covered]
                                +        buildOp [] (opDef "SampleDistortedBoundingBoxV2"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "image_size"
                                +  description: "1-D, containing `[height, width, channels]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "bounding_boxes"
                                +  description: "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "min_object_covered"
                                +  description: "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "begin"
                                +  description: "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "bboxes"
                                +  description: "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`.  Otherwise, it is seeded by a random\nseed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "aspect_ratio_range"
                                +  type: "list(float)"
                                +  default_value { list { f: 0.75 f: 1.33 } }
                                +  description: "The cropped area of the image must have an aspect ratio =\nwidth / height within this range."
                                +}
                                +attr {
                                +  name: "area_range"
                                +  type: "list(float)"
                                +  default_value { list { f: 5.0e-2 f: 1.0 } }
                                +  description: "The cropped area of the image must contain a fraction of the\nsupplied image within in this range."
                                +}
                                +attr {
                                +  name: "max_attempts"
                                +  type: "int"
                                +  default_value { i: 100 }
                                +  description: "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage."
                                +}
                                +attr {
                                +  name: "use_image_if_no_bounding_boxes"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error."
                                +}
                                +-}
                                +
                                +-- | Saves the input tensors to disk.
                                +--
                                +-- The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
                                +-- is written to `filename` with name `tensor_names[i]`.
                                +-- 
                                +-- See also `SaveSlices`.
                                +save :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorTypes t) => 
                                +        Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: Must have a single element. The name of the file to which we write
                                +                                              -- the tensor.
                                +        -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: Shape `[N]`. The names of the tensors to be saved.
                                +        -> TensorList (v'3) t -- ^ __data__: `N` tensors to save.
                                +        -> m' (ControlNode)
                                +save = save' id
                                +save' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorTypes t) => OpParams ->
                                +         Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: Must have a single element. The name of the file to which we write
                                +                                               -- the tensor.
                                +         -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: Shape `[N]`. The names of the tensors to be saved.
                                +         -> TensorList (v'3) t -- ^ __data__: `N` tensors to save.
                                +         -> m' (ControlNode)
                                +save' op'options filename tensor_names data' | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename,
                                +                                                             buildInputs tensor_names,
                                +                                                             buildInputs data']
                                +        buildOp [] (opDef "Save"
                                +                    & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "filename"
                                +  description: "Must have a single element. The name of the file to which we write\nthe tensor."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor_names"
                                +  description: "Shape `[N]`. The names of the tensors to be saved."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "data"
                                +  description: "`N` tensors to save."
                                +  type_list_attr: "T"
                                +}
                                +attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 }
                                +-}
                                +
                                +-- | Saves input tensors slices to disk.
                                +--
                                +-- This is like `Save` except that tensors can be listed in the saved file as being
                                +-- a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
                                +-- larger tensor and the slice that this tensor covers. `shapes_and_slices` must
                                +-- have as many elements as `tensor_names`.
                                +-- 
                                +-- Elements of the `shapes_and_slices` input must either be:
                                +-- 
                                +-- *  The empty string, in which case the corresponding tensor is
                                +--    saved normally.
                                +-- *  A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
                                +--    `dimI` are the dimensions of the larger tensor and `slice-spec`
                                +--    specifies what part is covered by the tensor to save.
                                +-- 
                                +-- `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
                                +-- where each `sliceI` is either:
                                +-- 
                                +-- *  The string `-` meaning that the slice covers all indices of this dimension
                                +-- *  `start,length` where `start` and `length` are integers.  In that
                                +--    case the slice covers `length` indices starting at `start`.
                                +-- 
                                +-- See also `Save`.
                                +saveSlices :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorTypes t) => 
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: Must have a single element. The name of the file to which we write the
                                +                                                    -- tensor.
                                +              -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: Shape `[N]`. The names of the tensors to be saved.
                                +              -> Tensor v'3 Data.ByteString.ByteString -- ^ __shapes_and_slices__: Shape `[N]`.  The shapes and slice specifications to use when
                                +                                                       -- saving the tensors.
                                +              -> TensorList (v'4) t -- ^ __data__: `N` tensors to save.
                                +              -> m' (ControlNode)
                                +saveSlices = saveSlices' id
                                +saveSlices' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorTypes t) =>
                                +               OpParams ->
                                +               Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: Must have a single element. The name of the file to which we write the
                                +                                                     -- tensor.
                                +               -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: Shape `[N]`. The names of the tensors to be saved.
                                +               -> Tensor v'3 Data.ByteString.ByteString -- ^ __shapes_and_slices__: Shape `[N]`.  The shapes and slice specifications to use when
                                +                                                        -- saving the tensors.
                                +               -> TensorList (v'4) t -- ^ __data__: `N` tensors to save.
                                +               -> m' (ControlNode)
                                +saveSlices' op'options filename tensor_names shapes_and_slices
                                +            data' | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename,
                                +                                                             buildInputs tensor_names,
                                +                                                             buildInputs shapes_and_slices,
                                +                                                             buildInputs data']
                                +        buildOp [] (opDef "SaveSlices"
                                +                    & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "filename"
                                +  description: "Must have a single element. The name of the file to which we write the\ntensor."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor_names"
                                +  description: "Shape `[N]`. The names of the tensors to be saved."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "shapes_and_slices"
                                +  description: "Shape `[N]`.  The shapes and slice specifications to use when\nsaving the tensors."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "data"
                                +  description: "`N` tensors to save."
                                +  type_list_attr: "T"
                                +}
                                +attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 }
                                +-}
                                +
                                +-- | Saves tensors in V2 checkpoint format.
                                +--
                                +-- By default, saves the named tensors in full.  If the caller wishes to save
                                +-- specific slices of full tensors, "shape_and_slices" should be non-empty strings
                                +-- and correspondingly well-formed.
                                +saveV2 :: forall v'1 v'2 v'3 v'4 dtypes m' . (MonadBuild m',
                                +                                              TensorTypes dtypes) => 
                                +          Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__: Must have a single element. The prefix of the V2 checkpoint to which we
                                +                                                -- write the tensors.
                                +          -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: shape {N}. The names of the tensors to be saved.
                                +          -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__: shape {N}.  The slice specs of the tensors to be saved.
                                +                                                   -- Empty strings indicate that they are non-partitioned tensors.
                                +          -> TensorList (v'4) dtypes -- ^ __tensors__: `N` tensors to save.
                                +          -> m' (ControlNode)
                                +saveV2 = saveV2' id
                                +saveV2' :: forall v'1 v'2 v'3 v'4 dtypes m' . (MonadBuild m',
                                +                                               TensorTypes dtypes) =>
                                +           OpParams ->
                                +           Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__: Must have a single element. The prefix of the V2 checkpoint to which we
                                +                                                 -- write the tensors.
                                +           -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: shape {N}. The names of the tensors to be saved.
                                +           -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__: shape {N}.  The slice specs of the tensors to be saved.
                                +                                                    -- Empty strings indicate that they are non-partitioned tensors.
                                +           -> TensorList (v'4) dtypes -- ^ __tensors__: `N` tensors to save.
                                +           -> m' (ControlNode)
                                +saveV2' op'options prefix tensor_names shape_and_slices
                                +        tensors | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs prefix,
                                +                                                             buildInputs tensor_names,
                                +                                                             buildInputs shape_and_slices,
                                +                                                             buildInputs tensors]
                                +        buildOp [] (opDef "SaveV2"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "prefix"
                                +  description: "Must have a single element. The prefix of the V2 checkpoint to which we\nwrite the tensors."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor_names"
                                +  description: "shape {N}. The names of the tensors to be saved."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "shape_and_slices"
                                +  description: "shape {N}.  The slice specs of the tensors to be saved.\nEmpty strings indicate that they are non-partitioned tensors."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensors"
                                +  description: "`N` tensors to save."
                                +  type_list_attr: "dtypes"
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +-}
                                +
                                +-- | Outputs a `Summary` protocol buffer with scalar values.
                                +--
                                +-- The input `tags` and `values` must have the same shape.  The generated summary
                                +-- has a summary value for each tag-value pair in `tags` and `values`.
                                +scalarSummary :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t) => 
                                +                 Tensor v'1 Data.ByteString.ByteString -- ^ __tags__: Tags for the summary.
                                +                 -> Tensor v'2 t -- ^ __values__: Same shape as `tags.  Values for the summary.
                                +                 -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar.  Serialized `Summary` protocol buffer.
                                +scalarSummary = scalarSummary' id
                                +scalarSummary' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                              Data.Int.Int64, Data.Int.Int8,
                                +                                              Data.Word.Word16, Data.Word.Word8,
                                +                                              Double, Float] t) => OpParams ->
                                +                  Tensor v'1 Data.ByteString.ByteString -- ^ __tags__: Tags for the summary.
                                +                  -> Tensor v'2 t -- ^ __values__: Same shape as `tags.  Values for the summary.
                                +                  -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar.  Serialized `Summary` protocol buffer.
                                +scalarSummary' op'options tags values | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tags,
                                +                                                             buildInputs values]
                                +        return (opDef "ScalarSummary"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tags" description: "Tags for the summary." type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "Same shape as `tags.  Values for the summary."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "summary"
                                +  description: "Scalar.  Serialized `Summary` protocol buffer."
                                +  type: DT_STRING
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Adds sparse updates to a variable reference.
                                +--
                                +-- This operation computes
                                +-- 
                                +--     # Scalar indices
                                +--     ref[indices, ...] += updates[...]
                                +-- 
                                +--     # Vector indices (for each i)
                                +--     ref[indices[i], ...] += updates[i, ...]
                                +-- 
                                +--     # High rank indices (for each i, ..., j)
                                +--     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
                                +-- 
                                +-- This operation outputs `ref` after the update is done.
                                +-- This makes it easier to chain operations that need to use the reset value.
                                +-- 
                                +-- Duplicate entries are handled correctly: if multiple `indices` reference
                                +-- the same location, their contributions add.
                                +-- 
                                +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt>
                                +-- </div>
                                +scatterAdd :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                              OneOf '[(Data.Complex.Complex Double),
                                +                                                      (Data.Complex.Complex Float),
                                +                                                      Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t,
                                +                                              OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +              
                                +              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +              -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +              -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to add to `ref`.
                                +              -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +              -- to use the updated values after the update is done.
                                +scatterAdd = scatterAdd' id
                                +scatterAdd' :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                       (Data.Complex.Complex Float),
                                +                                                       Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +               OpParams ->
                                +               Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +               -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +               -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to add to `ref`.
                                +               -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +               -- to use the updated values after the update is done.
                                +scatterAdd' op'options ref indices updates | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        buildOp [] (opDef "ScatterAdd"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a `Variable` node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A tensor of indices into the first dimension of `ref`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A tensor of updated values to add to `ref`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Divides a variable reference by sparse updates.
                                +--
                                +-- This operation computes
                                +-- 
                                +-- ```python
                                +--     # Scalar indices
                                +--     ref[indices, ...] /= updates[...]
                                +-- 
                                +--     # Vector indices (for each i)
                                +--     ref[indices[i], ...] /= updates[i, ...]
                                +-- 
                                +--     # High rank indices (for each i, ..., j)
                                +--     ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
                                +-- ```
                                +-- 
                                +-- This operation outputs `ref` after the update is done.
                                +-- This makes it easier to chain operations that need to use the reset value.
                                +-- 
                                +-- Duplicate entries are handled correctly: if multiple `indices` reference
                                +-- the same location, their contributions divide.
                                +-- 
                                +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
                                +scatterDiv :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                              OneOf '[(Data.Complex.Complex Double),
                                +                                                      (Data.Complex.Complex Float),
                                +                                                      Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t,
                                +                                              OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +              
                                +              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +              -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +              -> Tensor v'3 t -- ^ __updates__: A tensor of values that `ref` is divided by.
                                +              -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +              -- to use the updated values after the update is done.
                                +scatterDiv = scatterDiv' id
                                +scatterDiv' :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                       (Data.Complex.Complex Float),
                                +                                                       Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +               OpParams ->
                                +               Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +               -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +               -> Tensor v'3 t -- ^ __updates__: A tensor of values that `ref` is divided by.
                                +               -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +               -- to use the updated values after the update is done.
                                +scatterDiv' op'options ref indices updates | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        buildOp [] (opDef "ScatterDiv"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a `Variable` node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A tensor of indices into the first dimension of `ref`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A tensor of values that `ref` is divided by."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Multiplies sparse updates into a variable reference.
                                +--
                                +-- This operation computes
                                +-- 
                                +-- ```python
                                +--     # Scalar indices
                                +--     ref[indices, ...] *= updates[...]
                                +-- 
                                +--     # Vector indices (for each i)
                                +--     ref[indices[i], ...] *= updates[i, ...]
                                +-- 
                                +--     # High rank indices (for each i, ..., j)
                                +--     ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
                                +-- ```
                                +-- 
                                +-- This operation outputs `ref` after the update is done.
                                +-- This makes it easier to chain operations that need to use the reset value.
                                +-- 
                                +-- Duplicate entries are handled correctly: if multiple `indices` reference
                                +-- the same location, their contributions multiply.
                                +-- 
                                +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
                                +scatterMul :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                              OneOf '[(Data.Complex.Complex Double),
                                +                                                      (Data.Complex.Complex Float),
                                +                                                      Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t,
                                +                                              OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +              
                                +              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +              -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +              -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to multiply to `ref`.
                                +              -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +              -- to use the updated values after the update is done.
                                +scatterMul = scatterMul' id
                                +scatterMul' :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                       (Data.Complex.Complex Float),
                                +                                                       Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +               OpParams ->
                                +               Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +               -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +               -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to multiply to `ref`.
                                +               -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +               -- to use the updated values after the update is done.
                                +scatterMul' op'options ref indices updates | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        buildOp [] (opDef "ScatterMul"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a `Variable` node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A tensor of indices into the first dimension of `ref`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A tensor of updated values to multiply to `ref`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Scatter `updates` into a new (initially zero) tensor according to `indices`.
                                +--
                                +-- Creates a new tensor by applying sparse `updates` to individual
                                +-- values or slices within a zero tensor of the given `shape` according to
                                +-- indices.  This operator is the inverse of the @{tf.gather_nd} operator which
                                +-- extracts values or slices from a given tensor.
                                +-- 
                                +-- **WARNING**: The order in which updates are applied is nondeterministic, so the
                                +-- output will be nondeterministic if `indices` contains duplicates.
                                +-- 
                                +-- `indices` is an integer tensor containing indices into a new tensor of shape
                                +-- `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
                                +-- 
                                +--     indices.shape[-1] <= shape.rank
                                +-- 
                                +-- The last dimension of `indices` corresponds to indices into elements
                                +-- (if `indices.shape[-1] = shape.rank`) or slices
                                +-- (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
                                +-- `shape`.  `updates` is a tensor with shape
                                +-- 
                                +--     indices.shape[:-1] + shape[indices.shape[-1]:]
                                +-- 
                                +-- The simplest form of scatter is to insert individual elements in a tensor by
                                +-- index. For example, say we want to insert 4 scattered elements in a rank-1
                                +-- tensor with 8 elements.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
                                +-- </div>
                                +-- 
                                +-- In Python, this scatter operation would look like this:
                                +-- 
                                +-- ```python
                                +--     indices = tf.constant([[4], [3], [1], [7]])
                                +--     updates = tf.constant([9, 10, 11, 12])
                                +--     shape = tf.constant([8])
                                +--     scatter = tf.scatter_nd(indices, updates, shape)
                                +--     with tf.Session() as sess:
                                +--       print(sess.run(scatter))
                                +-- ```
                                +-- 
                                +-- The resulting tensor would look like this:
                                +-- 
                                +--     [0, 11, 0, 10, 9, 0, 0, 12]
                                +-- 
                                +-- We can also, insert entire slices of a higher rank tensor all at once. For
                                +-- example, if we wanted to insert two slices in the first dimension of a
                                +-- rank-3 tensor with two matrices of new values.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
                                +-- </div>
                                +-- 
                                +-- In Python, this scatter operation would look like this:
                                +-- 
                                +-- ```python
                                +--     indices = tf.constant([[0], [2]])
                                +--     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
                                +--                             [7, 7, 7, 7], [8, 8, 8, 8]],
                                +--                            [[5, 5, 5, 5], [6, 6, 6, 6],
                                +--                             [7, 7, 7, 7], [8, 8, 8, 8]]])
                                +--     shape = tf.constant([4, 4, 4])
                                +--     scatter = tf.scatter_nd(indices, updates, shape)
                                +--     with tf.Session() as sess:
                                +--       print(sess.run(scatter))
                                +-- ```
                                +-- 
                                +-- The resulting tensor would look like this:
                                +-- 
                                +--     [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
                                +--      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
                                +--      [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
                                +--      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
                                +scatterNd :: forall v'1 v'2 v'3 t tindices . (TensorType t,
                                +                                              OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +             
                                +             Tensor v'1 tindices -- ^ __indices__: Index tensor.
                                +             -> Tensor v'2 t -- ^ __updates__: Updates to scatter into output.
                                +             -> Tensor v'3 tindices -- ^ __shape__: 1-D. The shape of the resulting tensor.
                                +             -> Tensor Build t -- ^ __output__: A new tensor with the given shape and updates applied according
                                +             -- to the indices.
                                +scatterNd = scatterNd' id
                                +scatterNd' :: forall v'1 v'2 v'3 t tindices . (TensorType t,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +              OpParams ->
                                +              Tensor v'1 tindices -- ^ __indices__: Index tensor.
                                +              -> Tensor v'2 t -- ^ __updates__: Updates to scatter into output.
                                +              -> Tensor v'3 tindices -- ^ __shape__: 1-D. The shape of the resulting tensor.
                                +              -> Tensor Build t -- ^ __output__: A new tensor with the given shape and updates applied according
                                +              -- to the indices.
                                +scatterNd' op'options indices updates shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices,
                                +                                                             buildInputs updates,
                                +                                                             buildInputs shape]
                                +        return (opDef "ScatterNd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "indices" description: "Index tensor." type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "Updates to scatter into output."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "shape"
                                +  description: "1-D. The shape of the resulting tensor."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A new tensor with the given shape and updates applied according\nto the indices."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Applies sparse addition between `updates` and individual values or slices
                                +--
                                +-- within a given variable according to `indices`.
                                +-- 
                                +-- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
                                +-- 
                                +-- `indices` must be integer tensor, containing indices into `ref`.
                                +-- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
                                +-- 
                                +-- The innermost dimension of `indices` (with length `K`) corresponds to
                                +-- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
                                +-- dimension of `ref`.
                                +-- 
                                +-- `updates` is `Tensor` of rank `Q-1+P-K` with shape:
                                +-- 
                                +-- ```
                                +-- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
                                +-- ```
                                +-- 
                                +-- For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
                                +-- elements. In Python, that addition would look like this:
                                +-- 
                                +--     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
                                +--     indices = tf.constant([[4], [3], [1], [7]])
                                +--     updates = tf.constant([9, 10, 11, 12])
                                +--     add = tf.scatter_nd_add(ref, indices, updates)
                                +--     with tf.Session() as sess:
                                +--       print sess.run(add)
                                +-- 
                                +-- The resulting update to ref would look like this:
                                +-- 
                                +--     [1, 13, 3, 14, 14, 6, 7, 20]
                                +-- 
                                +-- See @{tf.scatter_nd} for more details about how to make updates to
                                +-- slices.
                                +scatterNdAdd :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                                OneOf '[(Data.Complex.Complex Double),
                                +                                                        (Data.Complex.Complex Float),
                                +                                                        Data.Int.Int16,
                                +                                                        Data.Int.Int32,
                                +                                                        Data.Int.Int64,
                                +                                                        Data.Int.Int8,
                                +                                                        Data.Word.Word16,
                                +                                                        Data.Word.Word8, Double,
                                +                                                        Float] t,
                                +                                                OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] tindices) =>
                                +                
                                +                Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node.
                                +                -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
                                +                                       -- A tensor of indices into ref.
                                +                -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values
                                +                                -- to add to ref.
                                +                -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want
                                +                -- to use the updated values after the update is done.
                                +scatterNdAdd = scatterNdAdd' id
                                +scatterNdAdd' :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                                 OneOf '[(Data.Complex.Complex Double),
                                +                                                         (Data.Complex.Complex Float),
                                +                                                         Data.Int.Int16,
                                +                                                         Data.Int.Int32,
                                +                                                         Data.Int.Int64,
                                +                                                         Data.Int.Int8,
                                +                                                         Data.Word.Word16,
                                +                                                         Data.Word.Word8,
                                +                                                         Double, Float] t,
                                +                                                 OneOf '[Data.Int.Int32,
                                +                                                         Data.Int.Int64] tindices) =>
                                +                 OpParams ->
                                +                 Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node.
                                +                 -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
                                +                                        -- A tensor of indices into ref.
                                +                 -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values
                                +                                 -- to add to ref.
                                +                 -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want
                                +                 -- to use the updated values after the update is done.
                                +scatterNdAdd' op'options ref indices updates | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        buildOp [] (opDef "ScatterNdAdd"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "A mutable Tensor. Should be from a Variable node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to ref."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Applies sparse addition to `input` using individual values or slices
                                +--
                                +-- from `updates` according to indices `indices`.  The updates are non-aliasing:
                                +-- `input` is only modified in-place if no other operations will use it.
                                +-- Otherwise, a copy of `input` is made.  This operation has a gradient with
                                +-- respect to both `input` and `updates`.
                                +-- 
                                +-- `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
                                +-- 
                                +-- `indices` must be integer tensor, containing indices into `input`.
                                +-- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
                                +-- 
                                +-- The innermost dimension of `indices` (with length `K`) corresponds to
                                +-- indices into elements (if `K = P`) or `(P-K)`-dimensional slices
                                +-- (if `K < P`) along the `K`th dimension of `input`.
                                +-- 
                                +-- `updates` is `Tensor` of rank `Q-1+P-K` with shape:
                                +-- 
                                +-- ```
                                +-- [d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].
                                +-- ```
                                +-- 
                                +-- For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
                                +-- elements. In Python, that addition would look like this:
                                +-- 
                                +--     input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
                                +--     indices = tf.constant([[4], [3], [1], [7]])
                                +--     updates = tf.constant([9, 10, 11, 12])
                                +--     output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
                                +--     with tf.Session() as sess:
                                +--       print(sess.run(output))
                                +-- 
                                +-- The resulting value `output` would look like this:
                                +-- 
                                +--     [1, 13, 3, 14, 14, 6, 7, 20]
                                +-- 
                                +-- See @{tf.scatter_nd} for more details about how to make updates to slices.
                                +scatterNdNonAliasingAdd :: forall v'1 v'2 v'3 t
                                +                           tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                               (Data.Complex.Complex Float),
                                +                                               Data.Int.Int16, Data.Int.Int32,
                                +                                               Data.Int.Int64, Data.Int.Int8,
                                +                                               Data.Word.Word16,
                                +                                               Data.Word.Word8, Double,
                                +                                               Float] t, OneOf '[Data.Int.Int32,
                                +                                                                 Data.Int.Int64] tindices) =>
                                +                           
                                +                           Tensor v'1 t -- ^ __input__: A Tensor.
                                +                           -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: `int32`, `int64`.
                                +                                                  -- A tensor of indices into `input`.
                                +                           -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values
                                +                                           -- to add to `input`.
                                +                           -> Tensor Build t -- ^ __output__: A `Tensor` with the same shape as `input`, containing values of `input`
                                +                           -- updated with `updates`.
                                +scatterNdNonAliasingAdd = scatterNdNonAliasingAdd' id
                                +scatterNdNonAliasingAdd' :: forall v'1 v'2 v'3 t
                                +                            tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                                (Data.Complex.Complex Float),
                                +                                                Data.Int.Int16, Data.Int.Int32,
                                +                                                Data.Int.Int64, Data.Int.Int8,
                                +                                                Data.Word.Word16,
                                +                                                Data.Word.Word8, Double,
                                +                                                Float] t,
                                +                                        OneOf '[Data.Int.Int32,
                                +                                                Data.Int.Int64] tindices) =>
                                +                            OpParams ->
                                +                            Tensor v'1 t -- ^ __input__: A Tensor.
                                +                            -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: `int32`, `int64`.
                                +                                                   -- A tensor of indices into `input`.
                                +                            -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values
                                +                                            -- to add to `input`.
                                +                            -> Tensor Build t -- ^ __output__: A `Tensor` with the same shape as `input`, containing values of `input`
                                +                            -- updated with `updates`.
                                +scatterNdNonAliasingAdd' op'options input indices updates | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        return (opDef "ScatterNdNonAliasingAdd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" description: "A Tensor." type_attr: "T" }
                                +input_arg {
                                +  name: "indices"
                                +  description: "A Tensor. Must be one of the following types: `int32`, `int64`.\nA tensor of indices into `input`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to `input`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A `Tensor` with the same shape as `input`, containing values of `input`\nupdated with `updates`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Applies sparse subtraction between `updates` and individual values or slices
                                +--
                                +-- within a given variable according to `indices`.
                                +-- 
                                +-- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
                                +-- 
                                +-- `indices` must be integer tensor, containing indices into `ref`.
                                +-- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
                                +-- 
                                +-- The innermost dimension of `indices` (with length `K`) corresponds to
                                +-- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
                                +-- dimension of `ref`.
                                +-- 
                                +-- `updates` is `Tensor` of rank `Q-1+P-K` with shape:
                                +-- 
                                +-- ```
                                +-- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
                                +-- ```
                                +-- 
                                +-- For example, say we want to subtract 4 scattered elements from a rank-1 tensor
                                +-- with 8 elements. In Python, that subtraction would look like this:
                                +-- 
                                +--     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
                                +--     indices = tf.constant([[4], [3], [1], [7]])
                                +--     updates = tf.constant([9, 10, 11, 12])
                                +--     sub = tf.scatter_nd_sub(ref, indices, updates)
                                +--     with tf.Session() as sess:
                                +--       print sess.run(sub)
                                +-- 
                                +-- The resulting update to ref would look like this:
                                +-- 
                                +--     [1, -9, 3, -6, -4, 6, 7, -4]
                                +-- 
                                +-- See @{tf.scatter_nd} for more details about how to make updates to
                                +-- slices.
                                +scatterNdSub :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                                OneOf '[(Data.Complex.Complex Double),
                                +                                                        (Data.Complex.Complex Float),
                                +                                                        Data.Int.Int16,
                                +                                                        Data.Int.Int32,
                                +                                                        Data.Int.Int64,
                                +                                                        Data.Int.Int8,
                                +                                                        Data.Word.Word16,
                                +                                                        Data.Word.Word8, Double,
                                +                                                        Float] t,
                                +                                                OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] tindices) =>
                                +                
                                +                Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node.
                                +                -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
                                +                                       -- A tensor of indices into ref.
                                +                -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values
                                +                                -- to subtract from ref.
                                +                -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want
                                +                -- to use the updated values after the update is done.
                                +scatterNdSub = scatterNdSub' id
                                +scatterNdSub' :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                                 OneOf '[(Data.Complex.Complex Double),
                                +                                                         (Data.Complex.Complex Float),
                                +                                                         Data.Int.Int16,
                                +                                                         Data.Int.Int32,
                                +                                                         Data.Int.Int64,
                                +                                                         Data.Int.Int8,
                                +                                                         Data.Word.Word16,
                                +                                                         Data.Word.Word8,
                                +                                                         Double, Float] t,
                                +                                                 OneOf '[Data.Int.Int32,
                                +                                                         Data.Int.Int64] tindices) =>
                                +                 OpParams ->
                                +                 Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node.
                                +                 -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
                                +                                        -- A tensor of indices into ref.
                                +                 -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values
                                +                                 -- to subtract from ref.
                                +                 -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want
                                +                 -- to use the updated values after the update is done.
                                +scatterNdSub' op'options ref indices updates | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        buildOp [] (opDef "ScatterNdSub"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "A mutable Tensor. Should be from a Variable node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto subtract from ref."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Applies sparse `updates` to individual values or slices within a given
                                +--
                                +-- variable according to `indices`.
                                +-- 
                                +-- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
                                +-- 
                                +-- `indices` must be integer tensor, containing indices into `ref`.
                                +-- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
                                +-- 
                                +-- The innermost dimension of `indices` (with length `K`) corresponds to
                                +-- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
                                +-- dimension of `ref`.
                                +-- 
                                +-- `updates` is `Tensor` of rank `Q-1+P-K` with shape:
                                +-- 
                                +-- ```
                                +-- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
                                +-- ```
                                +-- 
                                +-- For example, say we want to update 4 scattered elements to a rank-1 tensor to
                                +-- 8 elements. In Python, that update would look like this:
                                +-- 
                                +-- ```python
                                +--     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
                                +--     indices = tf.constant([[4], [3], [1] ,[7]])
                                +--     updates = tf.constant([9, 10, 11, 12])
                                +--     update = tf.scatter_nd_update(ref, indices, updates)
                                +--     with tf.Session() as sess:
                                +--       print sess.run(update)
                                +-- ```
                                +-- 
                                +-- The resulting update to ref would look like this:
                                +-- 
                                +--     [1, 11, 3, 10, 9, 6, 7, 12]
                                +-- 
                                +-- See @{tf.scatter_nd} for more details about how to make updates to
                                +-- slices.
                                +scatterNdUpdate :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t,
                                +                                                   OneOf '[Data.Int.Int32,
                                +                                                           Data.Int.Int64] tindices) =>
                                +                   
                                +                   Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node.
                                +                   -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
                                +                                          -- A tensor of indices into ref.
                                +                   -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated
                                +                                   -- values to add to ref.
                                +                   -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want to
                                +                   -- use the updated values after the update is done.
                                +scatterNdUpdate = scatterNdUpdate' id
                                +scatterNdUpdate' :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t,
                                +                                                    OneOf '[Data.Int.Int32,
                                +                                                            Data.Int.Int64] tindices) =>
                                +                    OpParams ->
                                +                    Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node.
                                +                    -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
                                +                                           -- A tensor of indices into ref.
                                +                    -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated
                                +                                    -- values to add to ref.
                                +                    -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want to
                                +                    -- use the updated values after the update is done.
                                +scatterNdUpdate' op'options ref indices updates | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        buildOp [] (opDef "ScatterNdUpdate"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "A mutable Tensor. Should be from a Variable node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A Tensor. Must have the same type as ref. A tensor of updated\nvalues to add to ref."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "Same as ref. Returned as a convenience for operations that want to\nuse the updated values after the update is done."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Subtracts sparse updates to a variable reference.
                                +--
                                +-- ```python
                                +--     # Scalar indices
                                +--     ref[indices, ...] -= updates[...]
                                +-- 
                                +--     # Vector indices (for each i)
                                +--     ref[indices[i], ...] -= updates[i, ...]
                                +-- 
                                +--     # High rank indices (for each i, ..., j)
                                +--     ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
                                +-- ```
                                +-- 
                                +-- This operation outputs `ref` after the update is done.
                                +-- This makes it easier to chain operations that need to use the reset value.
                                +-- 
                                +-- Duplicate entries are handled correctly: if multiple `indices` reference
                                +-- the same location, their (negated) contributions add.
                                +-- 
                                +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/ScatterSub.png" alt>
                                +-- </div>
                                +scatterSub :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                              OneOf '[(Data.Complex.Complex Double),
                                +                                                      (Data.Complex.Complex Float),
                                +                                                      Data.Int.Int16,
                                +                                                      Data.Int.Int32,
                                +                                                      Data.Int.Int64,
                                +                                                      Data.Int.Int8,
                                +                                                      Data.Word.Word16,
                                +                                                      Data.Word.Word8, Double,
                                +                                                      Float] t,
                                +                                              OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +              
                                +              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +              -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +              -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to subtract from `ref`.
                                +              -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +              -- to use the updated values after the update is done.
                                +scatterSub = scatterSub' id
                                +scatterSub' :: forall v'2 v'3 t tindices m' . (MonadBuild m',
                                +                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                       (Data.Complex.Complex Float),
                                +                                                       Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +               OpParams ->
                                +               Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +               -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +               -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to subtract from `ref`.
                                +               -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +               -- to use the updated values after the update is done.
                                +scatterSub' op'options ref indices updates | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        buildOp [] (opDef "ScatterSub"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a `Variable` node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A tensor of indices into the first dimension of `ref`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A tensor of updated values to subtract from `ref`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Applies sparse updates to a variable reference.
                                +--
                                +-- This operation computes
                                +-- 
                                +-- ```python
                                +--     # Scalar indices
                                +--     ref[indices, ...] = updates[...]
                                +-- 
                                +--     # Vector indices (for each i)
                                +--     ref[indices[i], ...] = updates[i, ...]
                                +-- 
                                +--     # High rank indices (for each i, ..., j)
                                +--     ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
                                +-- ```
                                +-- 
                                +-- This operation outputs `ref` after the update is done.
                                +-- This makes it easier to chain operations that need to use the reset value.
                                +-- 
                                +-- If values in `ref` is to be updated more than once, because there are
                                +-- duplicate entries in `indices`, the order at which the updates happen
                                +-- for each value is undefined.
                                +-- 
                                +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt>
                                +-- </div>
                                +scatterUpdate :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t,
                                +                                                 OneOf '[Data.Int.Int32,
                                +                                                         Data.Int.Int64] tindices) =>
                                +                 
                                +                 Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +                 -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +                 -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to store in `ref`.
                                +                 -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +                 -- to use the updated values after the update is done.
                                +scatterUpdate = scatterUpdate' id
                                +scatterUpdate' :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t,
                                +                                                  OneOf '[Data.Int.Int32,
                                +                                                          Data.Int.Int64] tindices) =>
                                +                  OpParams ->
                                +                  Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
                                +                  -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
                                +                  -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to store in `ref`.
                                +                  -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
                                +                  -- to use the updated values after the update is done.
                                +scatterUpdate' op'options ref indices updates | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs updates]
                                +        buildOp [] (opDef "ScatterUpdate"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "ref"
                                +  description: "Should be from a `Variable` node."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A tensor of indices into the first dimension of `ref`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "updates"
                                +  description: "A tensor of updated values to store in `ref`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_ref"
                                +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Computes fingerprints of the input strings.
                                +
                                +sdcaFprint :: 
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __input__: vector of strings to compute fingerprints on.
                                +              -> Tensor Build Data.Int.Int64 -- ^ __output__: a (N,2) shaped matrix where N is the number of elements in the input
                                +              -- vector. Each row contains the low and high parts of the fingerprint.
                                +sdcaFprint = sdcaFprint' id
                                +sdcaFprint' :: OpParams ->
                                +               Tensor v'1 Data.ByteString.ByteString -- ^ __input__: vector of strings to compute fingerprints on.
                                +               -> Tensor Build Data.Int.Int64 -- ^ __output__: a (N,2) shaped matrix where N is the number of elements in the input
                                +               -- vector. Each row contains the low and high parts of the fingerprint.
                                +sdcaFprint' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "SdcaFprint"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "vector of strings to compute fingerprints on."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "a (N,2) shaped matrix where N is the number of elements in the input\nvector. Each row contains the low and high parts of the fingerprint."
                                +  type: DT_INT64
                                +}
                                +-}
                                +
                                +-- | Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
                                +--
                                +-- linear models with L1 + L2 regularization. As global optimization objective is
                                +-- strongly-convex, the optimizer optimizes the dual objective at each step. The
                                +-- optimizer applies each update one example at a time. Examples are sampled
                                +-- uniformly, and the optimizer is learning rate free and enjoys linear convergence
                                +-- rate.
                                +-- 
                                +-- [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
                                +-- Shai Shalev-Shwartz, Tong Zhang. 2012
                                +-- 
                                +-- $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
                                +-- 
                                +-- [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
                                +-- Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
                                +-- Peter Richtarik, Martin Takac. 2015
                                +-- 
                                +-- [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
                                +-- Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
                                +sdcaOptimizer :: 
                                +                 Float -- ^ __l1__: Symmetric l1 regularization strength.
                                +                 -> Float -- ^ __l2__: Symmetric l2 regularization strength.
                                +                 -> Data.Int.Int64 -- ^ __num_inner_iterations__: Number of iterations per mini-batch.
                                +                 -> Data.Int.Int64 -- ^ __num_loss_partitions__: Number of partitions of the global loss function.
                                +                 -> [Tensor v'1 Data.Int.Int64] -- ^ __sparse_example_indices__: a list of vectors which contain example indices.
                                +                 -> [Tensor v'2 Data.Int.Int64] -- ^ __sparse_feature_indices__: a list of vectors which contain feature indices.
                                +                 -> [Tensor v'3 Float] -- ^ __sparse_feature_values__: a list of vectors which contains feature value
                                +                                       -- associated with each feature group.
                                +                 -> [Tensor v'4 Float] -- ^ __dense_features__: a list of matrices which contains the dense feature values.
                                +                 -> Tensor v'5 Float -- ^ __example_weights__: a vector which contains the weight associated with each
                                +                                     -- example.
                                +                 -> Tensor v'6 Float -- ^ __example_labels__: a vector which contains the label/target associated with each
                                +                                     -- example.
                                +                 -> [Tensor v'7 Data.Int.Int64] -- ^ __sparse_indices__: a list of vectors where each value is the indices which has
                                +                                                -- corresponding weights in sparse_weights. This field maybe omitted for the
                                +                                                -- dense approach.
                                +                 -> [Tensor v'8 Float] -- ^ __sparse_weights__: a list of vectors where each value is the weight associated with
                                +                                       -- a sparse feature group.
                                +                 -> [Tensor v'9 Float] -- ^ __dense_weights__: a list of vectors where the values are the weights associated
                                +                                       -- with a dense feature group.
                                +                 -> Tensor v'10 Float -- ^ __example_state_data__: a list of vectors containing the example state data.
                                +                 -> (Tensor Build Float, [Tensor Build Float],
                                +                     [Tensor Build Float])
                                +                 -- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__)
                                +                 --
                                +                 -- * __out_example_state_data__: a list of vectors containing the updated example state
                                +                 -- data.
                                +                 --
                                +                 -- * __out_delta_sparse_weights__: a list of vectors where each value is the delta
                                +                 -- weights associated with a sparse feature group.
                                +                 --
                                +                 -- * __out_delta_dense_weights__: a list of vectors where the values are the delta
                                +                 -- weights associated with a dense feature group.
                                +sdcaOptimizer = sdcaOptimizer' id
                                +sdcaOptimizer' :: OpParams ->
                                +                  Float -- ^ __l1__: Symmetric l1 regularization strength.
                                +                  -> Float -- ^ __l2__: Symmetric l2 regularization strength.
                                +                  -> Data.Int.Int64 -- ^ __num_inner_iterations__: Number of iterations per mini-batch.
                                +                  -> Data.Int.Int64 -- ^ __num_loss_partitions__: Number of partitions of the global loss function.
                                +                  -> [Tensor v'1 Data.Int.Int64] -- ^ __sparse_example_indices__: a list of vectors which contain example indices.
                                +                  -> [Tensor v'2 Data.Int.Int64] -- ^ __sparse_feature_indices__: a list of vectors which contain feature indices.
                                +                  -> [Tensor v'3 Float] -- ^ __sparse_feature_values__: a list of vectors which contains feature value
                                +                                        -- associated with each feature group.
                                +                  -> [Tensor v'4 Float] -- ^ __dense_features__: a list of matrices which contains the dense feature values.
                                +                  -> Tensor v'5 Float -- ^ __example_weights__: a vector which contains the weight associated with each
                                +                                      -- example.
                                +                  -> Tensor v'6 Float -- ^ __example_labels__: a vector which contains the label/target associated with each
                                +                                      -- example.
                                +                  -> [Tensor v'7 Data.Int.Int64] -- ^ __sparse_indices__: a list of vectors where each value is the indices which has
                                +                                                 -- corresponding weights in sparse_weights. This field maybe omitted for the
                                +                                                 -- dense approach.
                                +                  -> [Tensor v'8 Float] -- ^ __sparse_weights__: a list of vectors where each value is the weight associated with
                                +                                        -- a sparse feature group.
                                +                  -> [Tensor v'9 Float] -- ^ __dense_weights__: a list of vectors where the values are the weights associated
                                +                                        -- with a dense feature group.
                                +                  -> Tensor v'10 Float -- ^ __example_state_data__: a list of vectors containing the example state data.
                                +                  -> (Tensor Build Float, [Tensor Build Float],
                                +                      [Tensor Build Float])
                                +                  -- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__)
                                +                  --
                                +                  -- * __out_example_state_data__: a list of vectors containing the updated example state
                                +                  -- data.
                                +                  --
                                +                  -- * __out_delta_sparse_weights__: a list of vectors where each value is the delta
                                +                  -- weights associated with a sparse feature group.
                                +                  --
                                +                  -- * __out_delta_dense_weights__: a list of vectors where the values are the delta
                                +                  -- weights associated with a dense feature group.
                                +sdcaOptimizer' op'options l1 l2 num_inner_iterations num_loss_partitions
                                +               sparse_example_indices sparse_feature_indices
                                +               sparse_feature_values dense_features example_weights
                                +               example_labels sparse_indices sparse_weights dense_weights
                                +               example_state_data | eqLengthGuard [("num_sparse_features", [("sparse_example_indices", length sparse_example_indices),
                                +                                                                            ("sparse_feature_indices", length sparse_feature_indices),
                                +                                                                            ("sparse_indices", length sparse_indices),
                                +                                                                            ("sparse_weights", length sparse_weights)]),
                                +                                                   ("num_sparse_features_with_values", [("sparse_feature_values", length sparse_feature_values)]),
                                +                                                   ("num_dense_features", [("dense_features", length dense_features),
                                +                                                                           ("dense_weights", length dense_weights)])] =
                                +    pureOp [num_sparse_features, num_dense_features] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_example_indices,
                                +                                                             buildInputs sparse_feature_indices,
                                +                                                             buildInputs sparse_feature_values,
                                +                                                             buildInputs dense_features,
                                +                                                             buildInputs example_weights,
                                +                                                             buildInputs example_labels,
                                +                                                             buildInputs sparse_indices,
                                +                                                             buildInputs sparse_weights,
                                +                                                             buildInputs dense_weights,
                                +                                                             buildInputs example_state_data]
                                +        return (opDef "SdcaOptimizer"
                                +                & opAttr "l1" .~ l1
                                +                & opAttr "l2" .~ l2
                                +                & opAttr "num_inner_iterations" .~ num_inner_iterations
                                +                & opAttr "num_loss_partitions" .~ num_loss_partitions
                                +                & opAttr "num_sparse_features" .~ num_sparse_features
                                +                & opAttr "num_sparse_features_with_values" .~ num_sparse_features_with_values
                                +                & opAttr "num_dense_features" .~ num_dense_features
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    num_sparse_features = fromIntegral (length sparse_example_indices) :: Int64
                                +    num_sparse_features_with_values = fromIntegral (length sparse_feature_values) :: Int64
                                +    num_dense_features = fromIntegral (length dense_features) :: Int64
                                +{-
                                +input_arg {
                                +  name: "sparse_example_indices"
                                +  description: "a list of vectors which contain example indices."
                                +  type: DT_INT64
                                +  number_attr: "num_sparse_features"
                                +}
                                +input_arg {
                                +  name: "sparse_feature_indices"
                                +  description: "a list of vectors which contain feature indices."
                                +  type: DT_INT64
                                +  number_attr: "num_sparse_features"
                                +}
                                +input_arg {
                                +  name: "sparse_feature_values"
                                +  description: "a list of vectors which contains feature value\nassociated with each feature group."
                                +  type: DT_FLOAT
                                +  number_attr: "num_sparse_features_with_values"
                                +}
                                +input_arg {
                                +  name: "dense_features"
                                +  description: "a list of matrices which contains the dense feature values."
                                +  type: DT_FLOAT
                                +  number_attr: "num_dense_features"
                                +}
                                +input_arg {
                                +  name: "example_weights"
                                +  description: "a vector which contains the weight associated with each\nexample."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "example_labels"
                                +  description: "a vector which contains the label/target associated with each\nexample."
                                +  type: DT_FLOAT
                                +}
                                +input_arg {
                                +  name: "sparse_indices"
                                +  description: "a list of vectors where each value is the indices which has\ncorresponding weights in sparse_weights. This field maybe omitted for the\ndense approach."
                                +  type: DT_INT64
                                +  number_attr: "num_sparse_features"
                                +}
                                +input_arg {
                                +  name: "sparse_weights"
                                +  description: "a list of vectors where each value is the weight associated with\na sparse feature group."
                                +  type: DT_FLOAT
                                +  number_attr: "num_sparse_features"
                                +}
                                +input_arg {
                                +  name: "dense_weights"
                                +  description: "a list of vectors where the values are the weights associated\nwith a dense feature group."
                                +  type: DT_FLOAT
                                +  number_attr: "num_dense_features"
                                +}
                                +input_arg {
                                +  name: "example_state_data"
                                +  description: "a list of vectors containing the example state data."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "out_example_state_data"
                                +  description: "a list of vectors containing the updated example state\ndata."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "out_delta_sparse_weights"
                                +  description: "a list of vectors where each value is the delta\nweights associated with a sparse feature group."
                                +  type: DT_FLOAT
                                +  number_attr: "num_sparse_features"
                                +}
                                +output_arg {
                                +  name: "out_delta_dense_weights"
                                +  description: "a list of vectors where the values are the delta\nweights associated with a dense feature group."
                                +  type: DT_FLOAT
                                +  number_attr: "num_dense_features"
                                +}
                                +attr {
                                +  name: "loss_type"
                                +  type: "string"
                                +  description: "Type of the primal loss. Currently SdcaSolver supports logistic,\nsquared and hinge losses."
                                +  allowed_values {
                                +    list {
                                +      s: "logistic_loss"
                                +      s: "squared_loss"
                                +      s: "hinge_loss"
                                +      s: "smooth_hinge_loss"
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "adaptative"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Whether to use Adapative SDCA for the inner loop."
                                +}
                                +attr {
                                +  name: "num_sparse_features"
                                +  type: "int"
                                +  description: "Number of sparse feature groups to train on."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "num_sparse_features_with_values"
                                +  type: "int"
                                +  description: "Number of sparse feature groups with values\nassociated with it, otherwise implicitly treats values as 1.0."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "num_dense_features"
                                +  type: "int"
                                +  description: "Number of dense feature groups to train on."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "l1"
                                +  type: "float"
                                +  description: "Symmetric l1 regularization strength."
                                +}
                                +attr {
                                +  name: "l2"
                                +  type: "float"
                                +  description: "Symmetric l2 regularization strength."
                                +}
                                +attr {
                                +  name: "num_loss_partitions"
                                +  type: "int"
                                +  description: "Number of partitions of the global loss function."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "num_inner_iterations"
                                +  type: "int"
                                +  description: "Number of iterations per mini-batch."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Applies L1 regularization shrink step on the parameters.
                                +
                                +sdcaShrinkL1 :: forall m' . (MonadBuild m') => 
                                +                Float -- ^ __l1__: Symmetric l1 regularization strength.
                                +                -> Float -- ^ __l2__: Symmetric l2 regularization strength. Should be a positive float.
                                +                -> [Tensor Ref Float] -- ^ __weights__: a list of vectors where each value is the weight associated with a
                                +                                      -- feature group.
                                +                -> m' (ControlNode)
                                +sdcaShrinkL1 = sdcaShrinkL1' id
                                +sdcaShrinkL1' :: forall m' . (MonadBuild m') => OpParams ->
                                +                 Float -- ^ __l1__: Symmetric l1 regularization strength.
                                +                 -> Float -- ^ __l2__: Symmetric l2 regularization strength. Should be a positive float.
                                +                 -> [Tensor Ref Float] -- ^ __weights__: a list of vectors where each value is the weight associated with a
                                +                                       -- feature group.
                                +                 -> m' (ControlNode)
                                +sdcaShrinkL1' op'options l1 l2
                                +              weights | eqLengthGuard [("num_features", [("weights", length weights)])] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs weights]
                                +        buildOp [] (opDef "SdcaShrinkL1"
                                +                    & opAttr "l1" .~ l1
                                +                    & opAttr "l2" .~ l2
                                +                    & opAttr "num_features" .~ num_features
                                +                    & op'options & opInputs .~ op'inputs)
                                +  where
                                +    num_features = fromIntegral (length weights) :: Int64
                                +{-
                                +input_arg {
                                +  name: "weights"
                                +  description: "a list of vectors where each value is the weight associated with a\nfeature group."
                                +  type: DT_FLOAT
                                +  number_attr: "num_features"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "num_features"
                                +  type: "int"
                                +  description: "Number of feature groups to apply shrinking step."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "l1"
                                +  type: "float"
                                +  description: "Symmetric l1 regularization strength."
                                +}
                                +attr {
                                +  name: "l2"
                                +  type: "float"
                                +  description: "Symmetric l2 regularization strength. Should be a positive float."
                                +}
                                +-}
                                +
                                +-- | Computes the maximum along segments of a tensor.
                                +--
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +-- 
                                +-- Computes a tensor such that
                                +-- \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
                                +-- that `segment_ids[j] == i`.
                                +-- 
                                +-- If the max is empty for a given segment ID `i`, `output[i] = 0`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
                                +-- </div>
                                +segmentMax :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16,
                                +                                                   Data.Int.Int32,
                                +                                                   Data.Int.Int64,
                                +                                                   Data.Int.Int8,
                                +                                                   Data.Word.Word16,
                                +                                                   Data.Word.Word8, Double,
                                +                                                   Float] t,
                                +                                           OneOf '[Data.Int.Int32,
                                +                                                   Data.Int.Int64] tindices) => 
                                +              Tensor v'1 t -- ^ __data__
                                +              -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                     -- first dimension.  Values should be sorted and can be repeated.
                                +              -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +              -- has size `k`, the number of segments.
                                +segmentMax = segmentMax' id
                                +segmentMax' :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16,
                                +                                                    Data.Int.Int32,
                                +                                                    Data.Int.Int64,
                                +                                                    Data.Int.Int8,
                                +                                                    Data.Word.Word16,
                                +                                                    Data.Word.Word8, Double,
                                +                                                    Float] t,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tindices) =>
                                +               OpParams ->
                                +               Tensor v'1 t -- ^ __data__
                                +               -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                      -- first dimension.  Values should be sorted and can be repeated.
                                +               -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +               -- has size `k`, the number of segments.
                                +segmentMax' op'options data' segment_ids | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs segment_ids]
                                +        return (opDef "SegmentMax"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the mean along segments of a tensor.
                                +--
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +-- 
                                +-- Computes a tensor such that
                                +-- \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
                                +-- over `j` such that `segment_ids[j] == i` and `N` is the total number of
                                +-- values summed.
                                +-- 
                                +-- If the mean is empty for a given segment ID `i`, `output[i] = 0`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
                                +-- </div>
                                +segmentMean :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16,
                                +                                                    Data.Int.Int32,
                                +                                                    Data.Int.Int64,
                                +                                                    Data.Int.Int8,
                                +                                                    Data.Word.Word16,
                                +                                                    Data.Word.Word8, Double,
                                +                                                    Float] t,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tindices) =>
                                +               
                                +               Tensor v'1 t -- ^ __data__
                                +               -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                      -- first dimension.  Values should be sorted and can be repeated.
                                +               -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +               -- has size `k`, the number of segments.
                                +segmentMean = segmentMean' id
                                +segmentMean' :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Int.Int64,
                                +                                                     Data.Int.Int8,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8, Double,
                                +                                                     Float] t,
                                +                                             OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] tindices) =>
                                +                OpParams ->
                                +                Tensor v'1 t -- ^ __data__
                                +                -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                       -- first dimension.  Values should be sorted and can be repeated.
                                +                -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                -- has size `k`, the number of segments.
                                +segmentMean' op'options data' segment_ids | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs segment_ids]
                                +        return (opDef "SegmentMean"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the minimum along segments of a tensor.
                                +--
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +-- 
                                +-- Computes a tensor such that
                                +-- \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
                                +-- that `segment_ids[j] == i`.
                                +-- 
                                +-- If the min is empty for a given segment ID `i`, `output[i] = 0`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
                                +-- </div>
                                +segmentMin :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16,
                                +                                                   Data.Int.Int32,
                                +                                                   Data.Int.Int64,
                                +                                                   Data.Int.Int8,
                                +                                                   Data.Word.Word16,
                                +                                                   Data.Word.Word8, Double,
                                +                                                   Float] t,
                                +                                           OneOf '[Data.Int.Int32,
                                +                                                   Data.Int.Int64] tindices) => 
                                +              Tensor v'1 t -- ^ __data__
                                +              -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                     -- first dimension.  Values should be sorted and can be repeated.
                                +              -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +              -- has size `k`, the number of segments.
                                +segmentMin = segmentMin' id
                                +segmentMin' :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16,
                                +                                                    Data.Int.Int32,
                                +                                                    Data.Int.Int64,
                                +                                                    Data.Int.Int8,
                                +                                                    Data.Word.Word16,
                                +                                                    Data.Word.Word8, Double,
                                +                                                    Float] t,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tindices) =>
                                +               OpParams ->
                                +               Tensor v'1 t -- ^ __data__
                                +               -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                      -- first dimension.  Values should be sorted and can be repeated.
                                +               -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +               -- has size `k`, the number of segments.
                                +segmentMin' op'options data' segment_ids | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs segment_ids]
                                +        return (opDef "SegmentMin"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the product along segments of a tensor.
                                +--
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +-- 
                                +-- Computes a tensor such that
                                +-- \\(output_i = \prod_j data_j\\) where the product is over `j` such
                                +-- that `segment_ids[j] == i`.
                                +-- 
                                +-- If the product is empty for a given segment ID `i`, `output[i] = 1`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
                                +-- </div>
                                +segmentProd :: forall v'1 v'2 t
                                +               tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                   (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                   Data.Int.Int32, Data.Int.Int64,
                                +                                   Data.Int.Int8, Data.Word.Word16,
                                +                                   Data.Word.Word8, Double, Float] t,
                                +                           OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => 
                                +               Tensor v'1 t -- ^ __data__
                                +               -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                      -- first dimension.  Values should be sorted and can be repeated.
                                +               -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +               -- has size `k`, the number of segments.
                                +segmentProd = segmentProd' id
                                +segmentProd' :: forall v'1 v'2 t
                                +                tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t, OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +                OpParams ->
                                +                Tensor v'1 t -- ^ __data__
                                +                -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                       -- first dimension.  Values should be sorted and can be repeated.
                                +                -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                -- has size `k`, the number of segments.
                                +segmentProd' op'options data' segment_ids | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs segment_ids]
                                +        return (opDef "SegmentProd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the sum along segments of a tensor.
                                +--
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +-- 
                                +-- Computes a tensor such that
                                +-- \\(output_i = \sum_j data_j\\) where sum is over `j` such
                                +-- that `segment_ids[j] == i`.
                                +-- 
                                +-- If the sum is empty for a given segment ID `i`, `output[i] = 0`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
                                +-- </div>
                                +segmentSum :: forall v'1 v'2 t
                                +              tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                  Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                                  Data.Word.Word16, Data.Word.Word8, Double,
                                +                                  Float] t, OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tindices) =>
                                +              
                                +              Tensor v'1 t -- ^ __data__
                                +              -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                     -- first dimension.  Values should be sorted and can be repeated.
                                +              -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +              -- has size `k`, the number of segments.
                                +segmentSum = segmentSum' id
                                +segmentSum' :: forall v'1 v'2 t
                                +               tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                   (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                   Data.Int.Int32, Data.Int.Int64,
                                +                                   Data.Int.Int8, Data.Word.Word16,
                                +                                   Data.Word.Word8, Double, Float] t,
                                +                           OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) =>
                                +               OpParams ->
                                +               Tensor v'1 t -- ^ __data__
                                +               -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                      -- first dimension.  Values should be sorted and can be repeated.
                                +               -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +               -- has size `k`, the number of segments.
                                +segmentSum' op'options data' segment_ids | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs segment_ids]
                                +        return (opDef "SegmentSum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Selects elements from `t` or `e`, depending on `condition`.
                                +--
                                +-- The `t`, and `e` tensors must all have the same shape, and the
                                +-- output will also have that shape.
                                +-- 
                                +-- The `condition` tensor must be a scalar if `t` and `e` are scalars.
                                +-- If `t` and `e` are vectors or higher rank, then `condition` must be either a
                                +-- scalar, a vector with size matching the first dimension of `t`, or must have
                                +-- the same shape as `t`.
                                +-- 
                                +-- The `condition` tensor acts as a mask that chooses, based on the value at each
                                +-- element, whether the corresponding element / row in the output should be
                                +-- taken from `t` (if true) or `e` (if false).
                                +-- 
                                +-- If `condition` is a vector and `t` and `e` are higher rank matrices, then
                                +-- it chooses which row (outer dimension) to copy from `t` and `e`.
                                +-- If `condition` has the same shape as `t` and `e`, then it chooses which
                                +-- element to copy from `t` and `e`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```python
                                +-- # 'condition' tensor is [[True,  False]
                                +-- #                        [False, True]]
                                +-- # 't' is [[1, 2],
                                +-- #         [3, 4]]
                                +-- # 'e' is [[5, 6],
                                +-- #         [7, 8]]
                                +-- select(condition, t, e)  # => [[1, 6], [7, 4]]
                                +-- 
                                +-- 
                                +-- # 'condition' tensor is [True, False]
                                +-- # 't' is [[1, 2],
                                +-- #         [3, 4]]
                                +-- # 'e' is [[5, 6],
                                +-- #         [7, 8]]
                                +-- select(condition, t, e) ==> [[1, 2],
                                +--                              [7, 8]]
                                +-- 
                                +-- ```
                                +select :: forall v'1 v'2 v'3 t . (TensorType t) => 
                                +          Tensor v'1 Bool -- ^ __condition__
                                +          -> Tensor v'2 t -- ^ __t__: = A `Tensor` which may have the same shape as `condition`.
                                +                          -- If `condition` is rank 1, `t` may have higher rank,
                                +                          -- but its first dimension must match the size of `condition`.
                                +          -> Tensor v'3 t -- ^ __e__: = A `Tensor` with the same type and shape as `t`.
                                +          -> Tensor Build t -- ^ __output__: = A `Tensor` with the same type and shape as `t` and `e`.
                                +select = select' id
                                +select' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams ->
                                +           Tensor v'1 Bool -- ^ __condition__
                                +           -> Tensor v'2 t -- ^ __t__: = A `Tensor` which may have the same shape as `condition`.
                                +                           -- If `condition` is rank 1, `t` may have higher rank,
                                +                           -- but its first dimension must match the size of `condition`.
                                +           -> Tensor v'3 t -- ^ __e__: = A `Tensor` with the same type and shape as `t`.
                                +           -> Tensor Build t -- ^ __output__: = A `Tensor` with the same type and shape as `t` and `e`.
                                +select' op'options condition t e | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs condition,
                                +                                                             buildInputs t,
                                +                                                             buildInputs e]
                                +        return (opDef "Select"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "condition" type: DT_BOOL }
                                +input_arg {
                                +  name: "t"
                                +  description: "= A `Tensor` which may have the same shape as `condition`.\nIf `condition` is rank 1, `t` may have higher rank,\nbut its first dimension must match the size of `condition`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "e"
                                +  description: "= A `Tensor` with the same type and shape as `t`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "= A `Tensor` with the same type and shape as `t` and `e`."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
                                +--
                                +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
                                +-- form square matrices, with the same constraints as the single matrix
                                +-- SelfAdjointEig.
                                +-- 
                                +-- The result is a [..., M+1, M] matrix with [..., 0,:] containing the
                                +-- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
                                +selfAdjointEig :: forall v'1 t . (OneOf '[Double, Float] t) => 
                                +                  Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`.
                                +                  -> Tensor Build t -- ^ __output__: Shape is `[..., M+1, M]`.
                                +selfAdjointEig = selfAdjointEig' id
                                +selfAdjointEig' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams ->
                                +                   Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`.
                                +                   -> Tensor Build t -- ^ __output__: Shape is `[..., M+1, M]`.
                                +selfAdjointEig' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "SelfAdjointEig"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "Shape is `[..., M, M]`." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Shape is `[..., M+1, M]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
                                +}
                                +-}
                                +
                                +-- | Computes the eigen decomposition of one or more square self-adjoint matrices.
                                +--
                                +-- Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
                                +-- `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
                                +-- 
                                +-- ```python
                                +-- # a is a tensor.
                                +-- # e is a tensor of eigenvalues.
                                +-- # v is a tensor of eigenvectors.
                                +-- e, v = self_adjoint_eig(a)
                                +-- e = self_adjoint_eig(a, compute_v=False)
                                +-- ```
                                +selfAdjointEigV2 :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Double, Float] t) => 
                                +                    Tensor v'1 t -- ^ __input__: `Tensor` input of shape `[N, N]`.
                                +                    -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__)
                                +                    --
                                +                    -- * __e__: Eigenvalues. Shape is `[N]`.
                                +                    --
                                +                    -- * __v__: Eigenvectors. Shape is `[N, N]`.
                                +selfAdjointEigV2 = selfAdjointEigV2' id
                                +selfAdjointEigV2' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float),
                                +                                             Double, Float] t) => OpParams ->
                                +                     Tensor v'1 t -- ^ __input__: `Tensor` input of shape `[N, N]`.
                                +                     -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__)
                                +                     --
                                +                     -- * __e__: Eigenvalues. Shape is `[N]`.
                                +                     --
                                +                     -- * __v__: Eigenvectors. Shape is `[N, N]`.
                                +selfAdjointEigV2' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "SelfAdjointEigV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "`Tensor` input of shape `[N, N]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "e"
                                +  description: "Eigenvalues. Shape is `[N]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "v"
                                +  description: "Eigenvectors. Shape is `[N, N]`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "compute_v"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_DOUBLE
                                +      type: DT_FLOAT
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
                                +--
                                +-- The `SparseTensor` must have rank `R` greater than 1, and the first dimension
                                +-- is treated as the minibatch dimension.  Elements of the `SparseTensor`
                                +-- must be sorted in increasing order of this first dimension.  The serialized
                                +-- `SparseTensor` objects going into each row of `serialized_sparse` will have
                                +-- rank `R-1`.
                                +-- 
                                +-- The minibatch size `N` is extracted from `sparse_shape[0]`.
                                +serializeManySparse :: forall v'1 v'2 v'3 t . (TensorType t) => 
                                +                       Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
                                +                       -> Tensor v'2 t -- ^ __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
                                +                       -> Tensor Build Data.ByteString.ByteString -- ^ __serialized_sparse__
                                +serializeManySparse = serializeManySparse' id
                                +serializeManySparse' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams ->
                                +                        Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
                                +                        -> Tensor v'2 t -- ^ __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
                                +                        -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
                                +                        -> Tensor Build Data.ByteString.ByteString -- ^ __serialized_sparse__
                                +serializeManySparse' op'options sparse_indices sparse_values
                                +                     sparse_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices,
                                +                                                             buildInputs sparse_values,
                                +                                                             buildInputs sparse_shape]
                                +        return (opDef "SerializeManySparse"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sparse_indices"
                                +  description: "2-D.  The `indices` of the minibatch `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sparse_values"
                                +  description: "1-D.  The `values` of the minibatch `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "sparse_shape"
                                +  description: "1-D.  The `shape` of the minibatch `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "serialized_sparse" type: DT_STRING }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
                                +
                                +serializeSparse :: forall v'1 v'2 v'3 t . (TensorType t) => 
                                +                   Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the `SparseTensor`.
                                +                   -> Tensor v'2 t -- ^ __sparse_values__: 1-D.  The `values` of the `SparseTensor`.
                                +                   -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the `SparseTensor`.
                                +                   -> Tensor Build Data.ByteString.ByteString -- ^ __serialized_sparse__
                                +serializeSparse = serializeSparse' id
                                +serializeSparse' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams ->
                                +                    Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the `SparseTensor`.
                                +                    -> Tensor v'2 t -- ^ __sparse_values__: 1-D.  The `values` of the `SparseTensor`.
                                +                    -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the `SparseTensor`.
                                +                    -> Tensor Build Data.ByteString.ByteString -- ^ __serialized_sparse__
                                +serializeSparse' op'options sparse_indices sparse_values
                                +                 sparse_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices,
                                +                                                             buildInputs sparse_values,
                                +                                                             buildInputs sparse_shape]
                                +        return (opDef "SerializeSparse"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sparse_indices"
                                +  description: "2-D.  The `indices` of the `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sparse_values"
                                +  description: "1-D.  The `values` of the `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "sparse_shape"
                                +  description: "1-D.  The `shape` of the `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "serialized_sparse" type: DT_STRING }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Number of unique elements along last dimension of input `set`.
                                +--
                                +-- Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
                                +-- and `set_shape`. The last dimension contains values in a set, duplicates are
                                +-- allowed but ignored.
                                +-- 
                                +-- If `validate_indices` is `True`, this op validates the order and range of `set`
                                +-- indices.
                                +setSize :: forall v'1 v'2 v'3 t . (OneOf '[Data.ByteString.ByteString,
                                +                                           Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16,
                                +                                           Data.Word.Word8] t) => 
                                +           Tensor v'1 Data.Int.Int64 -- ^ __set_indices__: 2D `Tensor`, indices of a `SparseTensor`.
                                +           -> Tensor v'2 t -- ^ __set_values__: 1D `Tensor`, values of a `SparseTensor`.
                                +           -> Tensor v'3 Data.Int.Int64 -- ^ __set_shape__: 1D `Tensor`, shape of a `SparseTensor`.
                                +           -> Tensor Build Data.Int.Int32 -- ^ __size__: For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
                                +           -- `n-1` dimensions as `set`. Each value is the number of unique elements in
                                +           -- the corresponding `[0...n-1]` dimension of `set`.
                                +setSize = setSize' id
                                +setSize' :: forall v'1 v'2 v'3 t . (OneOf '[Data.ByteString.ByteString,
                                +                                            Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16,
                                +                                            Data.Word.Word8] t) => OpParams ->
                                +            Tensor v'1 Data.Int.Int64 -- ^ __set_indices__: 2D `Tensor`, indices of a `SparseTensor`.
                                +            -> Tensor v'2 t -- ^ __set_values__: 1D `Tensor`, values of a `SparseTensor`.
                                +            -> Tensor v'3 Data.Int.Int64 -- ^ __set_shape__: 1D `Tensor`, shape of a `SparseTensor`.
                                +            -> Tensor Build Data.Int.Int32 -- ^ __size__: For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
                                +            -- `n-1` dimensions as `set`. Each value is the number of unique elements in
                                +            -- the corresponding `[0...n-1]` dimension of `set`.
                                +setSize' op'options set_indices set_values set_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set_indices,
                                +                                                             buildInputs set_values,
                                +                                                             buildInputs set_shape]
                                +        return (opDef "SetSize"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "set_indices"
                                +  description: "2D `Tensor`, indices of a `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "set_values"
                                +  description: "1D `Tensor`, values of a `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "set_shape"
                                +  description: "1D `Tensor`, shape of a `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st\n`n-1` dimensions as `set`. Each value is the number of unique elements in\nthe corresponding `[0...n-1]` dimension of `set`."
                                +  type: DT_INT32
                                +}
                                +attr {
                                +  name: "validate_indices" type: "bool" default_value { b: true }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_STRING
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns the shape of a tensor.
                                +--
                                +-- This operation returns a 1-D integer tensor representing the shape of `input`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
                                +-- shape(t) ==> [2, 2, 3]
                                +-- ```
                                +shape :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] out_type) =>
                                +         
                                +         Tensor v'1 t -- ^ __input__
                                +         -> Tensor Build out_type -- ^ __output__
                                +shape = shape' id
                                +shape' :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                         Data.Int.Int64] out_type) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __input__
                                +          -> Tensor Build out_type -- ^ __output__
                                +shape' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Shape"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "out_type" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Returns shape of tensors.
                                +--
                                +-- This operation returns N 1-D integer tensors representing shape of `input[i]s`.
                                +shapeN :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                         Data.Int.Int64] out_type) =>
                                +          
                                +          [Tensor v'1 t] -- ^ __input__
                                +          -> [Tensor Build out_type] -- ^ __output__
                                +shapeN = shapeN' id
                                +shapeN' :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                          Data.Int.Int64] out_type) =>
                                +           OpParams ->
                                +           [Tensor v'1 t] -- ^ __input__
                                +           -> [Tensor Build out_type] -- ^ __output__
                                +shapeN' op'options input | eqLengthGuard [("N", [("input", length input)])] =
                                +    pureOp [n] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "ShapeN"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length input) :: Int64
                                +{-
                                +input_arg { name: "input" type_attr: "T" number_attr: "N" }
                                +output_arg {
                                +  name: "output" type_attr: "out_type" number_attr: "N"
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Generate a sharded filename. The filename is printf formatted as
                                +--
                                +--    %s-%05d-of-%05d, basename, shard, num_shards.
                                +shardedFilename :: 
                                +                   Tensor v'1 Data.ByteString.ByteString -- ^ __basename__
                                +                   -> Tensor v'2 Data.Int.Int32 -- ^ __shard__
                                +                   -> Tensor v'3 Data.Int.Int32 -- ^ __num_shards__
                                +                   -> Tensor Build Data.ByteString.ByteString -- ^ __filename__
                                +shardedFilename = shardedFilename' id
                                +shardedFilename' :: OpParams ->
                                +                    Tensor v'1 Data.ByteString.ByteString -- ^ __basename__
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __shard__
                                +                    -> Tensor v'3 Data.Int.Int32 -- ^ __num_shards__
                                +                    -> Tensor Build Data.ByteString.ByteString -- ^ __filename__
                                +shardedFilename' op'options basename shard num_shards | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs basename,
                                +                                                             buildInputs shard,
                                +                                                             buildInputs num_shards]
                                +        return (opDef "ShardedFilename"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "basename" type: DT_STRING }
                                +input_arg { name: "shard" type: DT_INT32 }
                                +input_arg { name: "num_shards" type: DT_INT32 }
                                +output_arg { name: "filename" type: DT_STRING }
                                +-}
                                +
                                +-- | Generate a glob pattern matching all sharded file names.
                                +
                                +shardedFilespec :: 
                                +                   Tensor v'1 Data.ByteString.ByteString -- ^ __basename__
                                +                   -> Tensor v'2 Data.Int.Int32 -- ^ __num_shards__
                                +                   -> Tensor Build Data.ByteString.ByteString -- ^ __filename__
                                +shardedFilespec = shardedFilespec' id
                                +shardedFilespec' :: OpParams ->
                                +                    Tensor v'1 Data.ByteString.ByteString -- ^ __basename__
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __num_shards__
                                +                    -> Tensor Build Data.ByteString.ByteString -- ^ __filename__
                                +shardedFilespec' op'options basename num_shards | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs basename,
                                +                                                             buildInputs num_shards]
                                +        return (opDef "ShardedFilespec"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "basename" type: DT_STRING }
                                +input_arg { name: "num_shards" type: DT_INT32 }
                                +output_arg { name: "filename" type: DT_STRING }
                                +-}
                                +
                                +-- | Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
                                +
                                +shuffleDataset :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => 
                                +                  [DataType] -- ^ __output_types__
                                +                  -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                  -> Tensor v'2 Data.Int.Int64 -- ^ __buffer_size__: The number of output elements to buffer in an iterator over
                                +                                               -- this dataset. Compare with the `min_after_dequeue` attr when creating a
                                +                                               -- `RandomShuffleQueue`.
                                +                  -> Tensor v'3 Data.Int.Int64 -- ^ __seed__: A scalar seed for the random number generator. If either seed or
                                +                                               -- seed2 is set to be non-zero, the random number generator is seeded
                                +                                               -- by the given seed.  Otherwise, a random seed is used.
                                +                  -> Tensor v'4 Data.Int.Int64 -- ^ __seed2__: A second scalar seed to avoid seed collision.
                                +                  -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +shuffleDataset = shuffleDataset' id
                                +shuffleDataset' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => OpParams ->
                                +                   [DataType] -- ^ __output_types__
                                +                   -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                   -> Tensor v'2 Data.Int.Int64 -- ^ __buffer_size__: The number of output elements to buffer in an iterator over
                                +                                                -- this dataset. Compare with the `min_after_dequeue` attr when creating a
                                +                                                -- `RandomShuffleQueue`.
                                +                   -> Tensor v'3 Data.Int.Int64 -- ^ __seed__: A scalar seed for the random number generator. If either seed or
                                +                                                -- seed2 is set to be non-zero, the random number generator is seeded
                                +                                                -- by the given seed.  Otherwise, a random seed is used.
                                +                   -> Tensor v'4 Data.Int.Int64 -- ^ __seed2__: A second scalar seed to avoid seed collision.
                                +                   -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +shuffleDataset' op'options output_types input_dataset buffer_size seed
                                +                seed2 | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset,
                                +                                                             buildInputs buffer_size,
                                +                                                             buildInputs seed,
                                +                                                             buildInputs seed2]
                                +        buildOp [] (opDef "ShuffleDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input_dataset" type: DT_RESOURCE }
                                +input_arg {
                                +  name: "buffer_size"
                                +  description: "The number of output elements to buffer in an iterator over\nthis dataset. Compare with the `min_after_dequeue` attr when creating a\n`RandomShuffleQueue`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "seed"
                                +  description: "A scalar seed for the random number generator. If either seed or\nseed2 is set to be non-zero, the random number generator is seeded\nby the given seed.  Otherwise, a random seed is used."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "seed2"
                                +  description: "A second scalar seed to avoid seed collision."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Computes sigmoid of `x` element-wise.
                                +--
                                +-- Specifically, `y = 1 / (1 + exp(-x))`.
                                +sigmoid :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                   (Data.Complex.Complex Float),
                                +                                   Data.Word.Word16, Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor Build t -- ^ __y__
                                +sigmoid = sigmoid' id
                                +sigmoid' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Word.Word16, Double, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor Build t -- ^ __y__
                                +sigmoid' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Sigmoid"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the gradient of the sigmoid of `x` wrt its input.
                                +--
                                +-- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
                                +-- `dy` is the corresponding input gradient.
                                +sigmoidGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                           (Data.Complex.Complex Float),
                                +                                           Data.Word.Word16, Double,
                                +                                           Float] t) => 
                                +               Tensor v'1 t -- ^ __x__
                                +               -> Tensor v'2 t -- ^ __y__
                                +               -> Tensor Build t -- ^ __z__
                                +sigmoidGrad = sigmoidGrad' id
                                +sigmoidGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Data.Word.Word16, Double,
                                +                                            Float] t) => OpParams ->
                                +                Tensor v'1 t -- ^ __x__
                                +                -> Tensor v'2 t -- ^ __y__
                                +                -> Tensor Build t -- ^ __z__
                                +sigmoidGrad' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "SigmoidGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns an element-wise indication of the sign of a number.
                                +--
                                +-- `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
                                +-- 
                                +-- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
                                +sign :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                Data.Int.Int64, Data.Word.Word16, Double,
                                +                                Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +sign = sign' id
                                +sign' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                 Data.Int.Int64, Data.Word.Word16, Double,
                                +                                 Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +sign' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Sign"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes sin of x element-wise.
                                +
                                +sin :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Data.Word.Word16,
                                +                               Double, Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor Build t -- ^ __y__
                                +sin = sin' id
                                +sin' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                Double, Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +sin' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Sin"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes hyperbolic sine of x element-wise.
                                +
                                +sinh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +sinh = sinh' id
                                +sinh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +sinh' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Sinh"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns the size of a tensor.
                                +--
                                +-- This operation returns an integer representing the number of elements in
                                +-- `input`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
                                +-- size(t) ==> 12
                                +-- ```
                                +size :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] out_type) =>
                                +        
                                +        Tensor v'1 t -- ^ __input__
                                +        -> Tensor Build out_type -- ^ __output__
                                +size = size' id
                                +size' :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] out_type) =>
                                +         OpParams ->
                                +         Tensor v'1 t -- ^ __input__
                                +         -> Tensor Build out_type -- ^ __output__
                                +size' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Size"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "out_type" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that skips `count` elements from the `input_dataset`.
                                +
                                +skipDataset :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +               [DataType] -- ^ __output_types__
                                +               -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +               -> Tensor v'2 Data.Int.Int64 -- ^ __count__: A scalar representing the number of elements from the `input_dataset`
                                +                                            -- that should be skipped.  If count is -1, skips everything.
                                +               -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +skipDataset = skipDataset' id
                                +skipDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                [DataType] -- ^ __output_types__
                                +                -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                -> Tensor v'2 Data.Int.Int64 -- ^ __count__: A scalar representing the number of elements from the `input_dataset`
                                +                                             -- that should be skipped.  If count is -1, skips everything.
                                +                -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +skipDataset' op'options output_types input_dataset count | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset,
                                +                                                             buildInputs count]
                                +        buildOp [] (opDef "SkipDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input_dataset" type: DT_RESOURCE }
                                +input_arg {
                                +  name: "count"
                                +  description: "A scalar representing the number of elements from the `input_dataset`\nthat should be skipped.  If count is -1, skips everything."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Parses a text file and creates a batch of examples.
                                +
                                +skipgram :: forall m' . (MonadBuild m') => 
                                +            Data.Int.Int64 -- ^ __batch_size__: The size of produced batch.
                                +            -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                    Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64,
                                +                    Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64,
                                +                    Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32))
                                +            -- ^ (__vocab_word__, __vocab_freq__, __words_per_epoch__, __current_epoch__, __total_words_processed__, __examples__, __labels__)
                                +            --
                                +            -- * __vocab_word__: A vector of words in the corpus.
                                +            --
                                +            -- * __vocab_freq__: Frequencies of words. Sorted in the non-ascending order.
                                +            --
                                +            -- * __words_per_epoch__: Number of words per epoch in the data file.
                                +            --
                                +            -- * __current_epoch__: The current epoch number.
                                +            --
                                +            -- * __total_words_processed__: The total number of words processed so far.
                                +            --
                                +            -- * __examples__: A vector of word ids.
                                +            --
                                +            -- * __labels__: A vector of word ids.
                                +skipgram = skipgram' id
                                +skipgram' :: forall m' . (MonadBuild m') => OpParams ->
                                +             Data.Int.Int64 -- ^ __batch_size__: The size of produced batch.
                                +             -> m' ((Tensor Value Data.ByteString.ByteString,
                                +                     Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64,
                                +                     Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64,
                                +                     Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32))
                                +             -- ^ (__vocab_word__, __vocab_freq__, __words_per_epoch__, __current_epoch__, __total_words_processed__, __examples__, __labels__)
                                +             --
                                +             -- * __vocab_word__: A vector of words in the corpus.
                                +             --
                                +             -- * __vocab_freq__: Frequencies of words. Sorted in the non-ascending order.
                                +             --
                                +             -- * __words_per_epoch__: Number of words per epoch in the data file.
                                +             --
                                +             -- * __current_epoch__: The current epoch number.
                                +             --
                                +             -- * __total_words_processed__: The total number of words processed so far.
                                +             --
                                +             -- * __examples__: A vector of word ids.
                                +             --
                                +             -- * __labels__: A vector of word ids.
                                +skipgram' op'options batch_size | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "Skipgram"
                                +                    & opAttr "batch_size" .~ batch_size
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "vocab_word"
                                +  description: "A vector of words in the corpus."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "vocab_freq"
                                +  description: "Frequencies of words. Sorted in the non-ascending order."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "words_per_epoch"
                                +  description: "Number of words per epoch in the data file."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "current_epoch"
                                +  description: "The current epoch number."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "total_words_processed"
                                +  description: "The total number of words processed so far."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "examples"
                                +  description: "A vector of word ids."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "labels" description: "A vector of word ids." type: DT_INT32
                                +}
                                +attr {
                                +  name: "filename"
                                +  type: "string"
                                +  description: "The corpus\'s text file name."
                                +}
                                +attr {
                                +  name: "batch_size"
                                +  type: "int"
                                +  description: "The size of produced batch."
                                +}
                                +attr {
                                +  name: "window_size"
                                +  type: "int"
                                +  default_value { i: 5 }
                                +  description: "The number of words to predict to the left and right of the target."
                                +}
                                +attr {
                                +  name: "min_count"
                                +  type: "int"
                                +  default_value { i: 5 }
                                +  description: "The minimum number of word occurrences for it to be included in the\nvocabulary."
                                +}
                                +attr {
                                +  name: "subsample"
                                +  type: "float"
                                +  default_value { f: 1.0e-3 }
                                +  description: "Threshold for word occurrence. Words that appear with higher\nfrequency will be randomly down-sampled. Set to 0 to disable."
                                +}
                                +-}
                                +
                                +-- | Return a slice from 'input'.
                                +--
                                +-- The output tensor is a tensor with dimensions described by 'size'
                                +-- whose values are extracted from 'input' starting at the offsets in
                                +-- 'begin'.
                                +-- 
                                +-- *Requirements*:
                                +--   0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
                                +slice :: forall v'1 v'2 v'3 t index . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] index) =>
                                +         
                                +         Tensor v'1 t -- ^ __input__
                                +         -> Tensor v'2 index -- ^ __begin__: begin[i] specifies the offset into the 'i'th dimension of
                                +                             -- 'input' to slice from.
                                +         -> Tensor v'3 index -- ^ __size__: size[i] specifies the number of elements of the 'i'th dimension
                                +                             -- of 'input' to slice. If size[i] is -1, all remaining elements in dimension
                                +                             -- i are included in the slice (i.e. this is equivalent to setting
                                +                             -- size[i] = input.dim_size(i) - begin[i]).
                                +         -> Tensor Build t -- ^ __output__
                                +slice = slice' id
                                +slice' :: forall v'1 v'2 v'3 t index . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] index) =>
                                +          OpParams ->
                                +          Tensor v'1 t -- ^ __input__
                                +          -> Tensor v'2 index -- ^ __begin__: begin[i] specifies the offset into the 'i'th dimension of
                                +                              -- 'input' to slice from.
                                +          -> Tensor v'3 index -- ^ __size__: size[i] specifies the number of elements of the 'i'th dimension
                                +                              -- of 'input' to slice. If size[i] is -1, all remaining elements in dimension
                                +                              -- i are included in the slice (i.e. this is equivalent to setting
                                +                              -- size[i] = input.dim_size(i) - begin[i]).
                                +          -> Tensor Build t -- ^ __output__
                                +slice' op'options input begin size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs begin,
                                +                                                             buildInputs size]
                                +        return (opDef "Slice"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Index" .~ tensorType (undefined :: index)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg {
                                +  name: "begin"
                                +  description: "begin[i] specifies the offset into the \'i\'th dimension of\n\'input\' to slice from."
                                +  type_attr: "Index"
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "size[i] specifies the number of elements of the \'i\'th dimension\nof \'input\' to slice. If size[i] is -1, all remaining elements in dimension\ni are included in the slice (i.e. this is equivalent to setting\nsize[i] = input.dim_size(i) - begin[i])."
                                +  type_attr: "Index"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Index"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes softmax activations.
                                +--
                                +-- For each batch `i` and class `j` we have
                                +-- 
                                +--     softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
                                +softmax :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => 
                                +           Tensor v'1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.
                                +           -> Tensor Build t -- ^ __softmax__: Same shape as `logits`.
                                +softmax = softmax' id
                                +softmax' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) =>
                                +            OpParams ->
                                +            Tensor v'1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.
                                +            -> Tensor Build t -- ^ __softmax__: Same shape as `logits`.
                                +softmax' op'options logits | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits]
                                +        return (opDef "Softmax"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "logits"
                                +  description: "2-D with shape `[batch_size, num_classes]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "softmax"
                                +  description: "Same shape as `logits`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes softmax cross entropy cost and gradients to backpropagate.
                                +--
                                +-- Inputs are the logits, not probabilities.
                                +softmaxCrossEntropyWithLogits :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16,
                                +                                                             Double,
                                +                                                             Float] t) => 
                                +                                 Tensor v'1 t -- ^ __features__: batch_size x num_classes matrix
                                +                                 -> Tensor v'2 t -- ^ __labels__: batch_size x num_classes matrix
                                +                                                 -- The caller must ensure that each batch of labels represents a valid
                                +                                                 -- probability distribution.
                                +                                 -> (Tensor Build t, Tensor Build t)
                                +                                 -- ^ (__loss__, __backprop__)
                                +                                 --
                                +                                 -- * __loss__: Per example loss (batch_size vector).
                                +                                 --
                                +                                 -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).
                                +softmaxCrossEntropyWithLogits = softmaxCrossEntropyWithLogits' id
                                +softmaxCrossEntropyWithLogits' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16,
                                +                                                              Double,
                                +                                                              Float] t) =>
                                +                                  OpParams ->
                                +                                  Tensor v'1 t -- ^ __features__: batch_size x num_classes matrix
                                +                                  -> Tensor v'2 t -- ^ __labels__: batch_size x num_classes matrix
                                +                                                  -- The caller must ensure that each batch of labels represents a valid
                                +                                                  -- probability distribution.
                                +                                  -> (Tensor Build t, Tensor Build t)
                                +                                  -- ^ (__loss__, __backprop__)
                                +                                  --
                                +                                  -- * __loss__: Per example loss (batch_size vector).
                                +                                  --
                                +                                  -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).
                                +softmaxCrossEntropyWithLogits' op'options features labels | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features,
                                +                                                             buildInputs labels]
                                +        return (opDef "SoftmaxCrossEntropyWithLogits"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "features"
                                +  description: "batch_size x num_classes matrix"
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "labels"
                                +  description: "batch_size x num_classes matrix\nThe caller must ensure that each batch of labels represents a valid\nprobability distribution."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "loss"
                                +  description: "Per example loss (batch_size vector)."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "backprop"
                                +  description: "backpropagated gradients (batch_size x num_classes matrix)."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes softplus: `log(exp(features) + 1)`.
                                +
                                +softplus :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => 
                                +            Tensor v'1 t -- ^ __features__
                                +            -> Tensor Build t -- ^ __activations__
                                +softplus = softplus' id
                                +softplus' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __features__
                                +             -> Tensor Build t -- ^ __activations__
                                +softplus' op'options features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features]
                                +        return (opDef "Softplus"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "features" type_attr: "T" }
                                +output_arg { name: "activations" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes softplus gradients for a softplus operation.
                                +
                                +softplusGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t) => 
                                +                Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softplus operation.
                                +                -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding softplus operation.
                                +                -> Tensor Build t -- ^ __backprops__: The gradients: `gradients / (1 + exp(-features))`.
                                +softplusGrad = softplusGrad' id
                                +softplusGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t) => OpParams ->
                                +                 Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softplus operation.
                                +                 -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding softplus operation.
                                +                 -> Tensor Build t -- ^ __backprops__: The gradients: `gradients / (1 + exp(-features))`.
                                +softplusGrad' op'options gradients features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients,
                                +                                                             buildInputs features]
                                +        return (opDef "SoftplusGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "gradients"
                                +  description: "The backpropagated gradients to the corresponding softplus operation."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "features"
                                +  description: "The features passed as input to the corresponding softplus operation."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "backprops"
                                +  description: "The gradients: `gradients / (1 + exp(-features))`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes softsign: `features / (abs(features) + 1)`.
                                +
                                +softsign :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => 
                                +            Tensor v'1 t -- ^ __features__
                                +            -> Tensor Build t -- ^ __activations__
                                +softsign = softsign' id
                                +softsign' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => OpParams ->
                                +             Tensor v'1 t -- ^ __features__
                                +             -> Tensor Build t -- ^ __activations__
                                +softsign' op'options features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features]
                                +        return (opDef "Softsign"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "features" type_attr: "T" }
                                +output_arg { name: "activations" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes softsign gradients for a softsign operation.
                                +
                                +softsignGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t) => 
                                +                Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softsign operation.
                                +                -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding softsign operation.
                                +                -> Tensor Build t -- ^ __backprops__: The gradients: `gradients / (1 + abs(-features)) ** 2`.
                                +softsignGrad = softsignGrad' id
                                +softsignGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t) => OpParams ->
                                +                 Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softsign operation.
                                +                 -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding softsign operation.
                                +                 -> Tensor Build t -- ^ __backprops__: The gradients: `gradients / (1 + abs(-features)) ** 2`.
                                +softsignGrad' op'options gradients features | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients,
                                +                                                             buildInputs features]
                                +        return (opDef "SoftsignGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "gradients"
                                +  description: "The backpropagated gradients to the corresponding softsign operation."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "features"
                                +  description: "The features passed as input to the corresponding softsign operation."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "backprops"
                                +  description: "The gradients: `gradients / (1 + abs(-features)) ** 2`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | SpaceToBatch for 4-D tensors of type T.
                                +--
                                +-- This is a legacy version of the more general SpaceToBatchND.
                                +-- 
                                +-- Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
                                +-- More specifically, this op outputs a copy of the input tensor where values from
                                +-- the `height` and `width` dimensions are moved to the `batch` dimension. After
                                +-- the zero-padding, both `height` and `width` of the input must be divisible by the
                                +-- block size.
                                +spaceToBatch :: forall v'1 v'2 t tpaddings . (TensorType t,
                                +                                              OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tpaddings) =>
                                +                
                                +                Data.Int.Int64 -- ^ __block_size__
                                +                -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, depth]`.
                                +                -> Tensor v'2 tpaddings -- ^ __paddings__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
                                +                                        --   the padding of the input with zeros across the spatial dimensions as follows:
                                +                                        -- 
                                +                                        --       paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
                                +                                        -- 
                                +                                        --   The effective spatial dimensions of the zero-padded input tensor will be:
                                +                                        -- 
                                +                                        --       height_pad = pad_top + height + pad_bottom
                                +                                        --       width_pad = pad_left + width + pad_right
                                +                                        -- 
                                +                                        -- The attr `block_size` must be greater than one. It indicates the block size.
                                +                                        -- 
                                +                                        --   * Non-overlapping blocks of size `block_size x block size` in the height and
                                +                                        --     width dimensions are rearranged into the batch dimension at each location.
                                +                                        --   * The batch of the output tensor is `batch * block_size * block_size`.
                                +                                        --   * Both height_pad and width_pad must be divisible by block_size.
                                +                                        -- 
                                +                                        -- The shape of the output will be:
                                +                                        -- 
                                +                                        --     [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
                                +                                        --      depth]
                                +                                        -- 
                                +                                        -- Some examples:
                                +                                        -- 
                                +                                        -- (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1], [2]], [[3], [4]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- The output tensor has shape `[4, 1, 1, 1]` and value:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1, 2, 3], [4, 5, 6]],
                                +                                        --       [[7, 8, 9], [10, 11, 12]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- The output tensor has shape `[4, 1, 1, 3]` and value:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1],   [2],  [3],  [4]],
                                +                                        --       [[5],   [6],  [7],  [8]],
                                +                                        --       [[9],  [10], [11],  [12]],
                                +                                        --       [[13], [14], [15],  [16]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- The output tensor has shape `[4, 2, 2, 1]` and value:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1], [3]], [[9], [11]]],
                                +                                        --      [[[2], [4]], [[10], [12]]],
                                +                                        --      [[[5], [7]], [[13], [15]]],
                                +                                        --      [[[6], [8]], [[14], [16]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1],   [2],  [3],  [4]],
                                +                                        --       [[5],   [6],  [7],  [8]]],
                                +                                        --      [[[9],  [10], [11],  [12]],
                                +                                        --       [[13], [14], [15],  [16]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- The output tensor has shape `[8, 1, 2, 1]` and value:
                                +                                        -- 
                                +                                        -- ```
                                +                                        -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
                                +                                        --      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
                                +                                        -- ```
                                +                                        -- 
                                +                                        -- Among others, this operation is useful for reducing atrous convolution into
                                +                                        -- regular convolution.
                                +                -> Tensor Build t -- ^ __output__
                                +spaceToBatch = spaceToBatch' id
                                +spaceToBatch' :: forall v'1 v'2 t tpaddings . (TensorType t,
                                +                                               OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tpaddings) =>
                                +                 OpParams ->
                                +                 Data.Int.Int64 -- ^ __block_size__
                                +                 -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, depth]`.
                                +                 -> Tensor v'2 tpaddings -- ^ __paddings__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
                                +                                         --   the padding of the input with zeros across the spatial dimensions as follows:
                                +                                         -- 
                                +                                         --       paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
                                +                                         -- 
                                +                                         --   The effective spatial dimensions of the zero-padded input tensor will be:
                                +                                         -- 
                                +                                         --       height_pad = pad_top + height + pad_bottom
                                +                                         --       width_pad = pad_left + width + pad_right
                                +                                         -- 
                                +                                         -- The attr `block_size` must be greater than one. It indicates the block size.
                                +                                         -- 
                                +                                         --   * Non-overlapping blocks of size `block_size x block size` in the height and
                                +                                         --     width dimensions are rearranged into the batch dimension at each location.
                                +                                         --   * The batch of the output tensor is `batch * block_size * block_size`.
                                +                                         --   * Both height_pad and width_pad must be divisible by block_size.
                                +                                         -- 
                                +                                         -- The shape of the output will be:
                                +                                         -- 
                                +                                         --     [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
                                +                                         --      depth]
                                +                                         -- 
                                +                                         -- Some examples:
                                +                                         -- 
                                +                                         -- (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
                                +                                         -- 
                                +                                         -- ```
                                +                                         -- x = [[[[1], [2]], [[3], [4]]]]
                                +                                         -- ```
                                +                                         -- 
                                +                                         -- The output tensor has shape `[4, 1, 1, 1]` and value:
                                +                                         -- 
                                +                                         -- ```
                                +                                         -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
                                +                                         -- ```
                                +                                         -- 
                                +                                         -- (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
                                +                                         -- 
                                +                                         -- ```
                                +                                         -- x = [[[[1, 2, 3], [4, 5, 6]],
                                +                                         --       [[7, 8, 9], [10, 11, 12]]]]
                                +                                         -- ```
                                +                                         -- 
                                +                                         -- The output tensor has shape `[4, 1, 1, 3]` and value:
                                +                                         -- 
                                +                                         -- ```
                                +                                         -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
                                +                                         -- ```
                                +                                         -- 
                                +                                         -- (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
                                +                                         -- 
                                +                                         -- ```
                                +                                         -- x = [[[[1],   [2],  [3],  [4]],
                                +                                         --       [[5],   [6],  [7],  [8]],
                                +                                         --       [[9],  [10], [11],  [12]],
                                +                                         --       [[13], [14], [15],  [16]]]]
                                +                                         -- ```
                                +                                         -- 
                                +                                         -- The output tensor has shape `[4, 2, 2, 1]` and value:
                                +                                         -- 
                                +                                         -- ```
                                +                                         -- x = [[[[1], [3]], [[9], [11]]],
                                +                                         --      [[[2], [4]], [[10], [12]]],
                                +                                         --      [[[5], [7]], [[13], [15]]],
                                +                                         --      [[[6], [8]], [[14], [16]]]]
                                +                                         -- ```
                                +                                         -- 
                                +                                         -- (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
                                +                                         -- 
                                +                                         -- ```
                                +                                         -- x = [[[[1],   [2],  [3],  [4]],
                                +                                         --       [[5],   [6],  [7],  [8]]],
                                +                                         --      [[[9],  [10], [11],  [12]],
                                +                                         --       [[13], [14], [15],  [16]]]]
                                +                                         -- ```
                                +                                         -- 
                                +                                         -- The output tensor has shape `[8, 1, 2, 1]` and value:
                                +                                         -- 
                                +                                         -- ```
                                +                                         -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
                                +                                         --      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
                                +                                         -- ```
                                +                                         -- 
                                +                                         -- Among others, this operation is useful for reducing atrous convolution into
                                +                                         -- regular convolution.
                                +                 -> Tensor Build t -- ^ __output__
                                +spaceToBatch' op'options block_size input paddings | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs paddings]
                                +        return (opDef "SpaceToBatch"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings)
                                +                & opAttr "block_size" .~ block_size
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "4-D with shape `[batch, height, width, depth]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "paddings"
                                +  description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\n  the padding of the input with zeros across the spatial dimensions as follows:\n\n      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]\n\n  The effective spatial dimensions of the zero-padded input tensor will be:\n\n      height_pad = pad_top + height + pad_bottom\n      width_pad = pad_left + width + pad_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\n  * Non-overlapping blocks of size `block_size x block size` in the height and\n    width dimensions are rearranged into the batch dimension at each location.\n  * The batch of the output tensor is `batch * block_size * block_size`.\n  * Both height_pad and width_pad must be divisible by block_size.\n\nThe shape of the output will be:\n\n    [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n     depth]\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:\n\n```\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]],\n      [[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:\n\n```\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 2, 1]` and value:\n\n```\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution."
                                +  type_attr: "Tpaddings"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tpaddings"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "block_size" type: "int" has_minimum: true minimum: 2
                                +}
                                +-}
                                +
                                +-- | SpaceToBatch for N-D tensors of type T.
                                +--
                                +-- This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
                                +-- grid of blocks of shape `block_shape`, and interleaves these blocks with the
                                +-- "batch" dimension (0) such that in the output, the spatial dimensions
                                +-- `[1, ..., M]` correspond to the position within the grid, and the batch
                                +-- dimension combines both the position within a spatial block and the original
                                +-- batch position.  Prior to division into blocks, the spatial dimensions of the
                                +-- input are optionally zero padded according to `paddings`.  See below for a
                                +-- precise description.
                                +spaceToBatchND :: forall v'1 v'2 v'3 t tblock_shape tpaddings . (TensorType t,
                                +                                                                 OneOf '[Data.Int.Int32,
                                +                                                                         Data.Int.Int64] tblock_shape,
                                +                                                                 OneOf '[Data.Int.Int32,
                                +                                                                         Data.Int.Int64] tpaddings) =>
                                +                  
                                +                  Tensor v'1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
                                +                               -- where spatial_shape has `M` dimensions.
                                +                  -> Tensor v'2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1.
                                +                  -> Tensor v'3 tpaddings -- ^ __paddings__: 2-D with shape `[M, 2]`, all values must be >= 0.
                                +                                          --   `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
                                +                                          --   `i + 1`, which corresponds to spatial dimension `i`.  It is required that
                                +                                          --   `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
                                +                                          -- 
                                +                                          -- This operation is equivalent to the following steps:
                                +                                          -- 
                                +                                          -- 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
                                +                                          --    input according to `paddings` to produce `padded` of shape `padded_shape`.
                                +                                          -- 
                                +                                          -- 2. Reshape `padded` to `reshaped_padded` of shape:
                                +                                          -- 
                                +                                          --      [batch] +
                                +                                          --      [padded_shape[1] / block_shape[0],
                                +                                          --        block_shape[0],
                                +                                          --       ...,
                                +                                          --       padded_shape[M] / block_shape[M-1],
                                +                                          --       block_shape[M-1]] +
                                +                                          --      remaining_shape
                                +                                          -- 
                                +                                          -- 3. Permute dimensions of `reshaped_padded` to produce
                                +                                          --    `permuted_reshaped_padded` of shape:
                                +                                          -- 
                                +                                          --      block_shape +
                                +                                          --      [batch] +
                                +                                          --      [padded_shape[1] / block_shape[0],
                                +                                          --       ...,
                                +                                          --       padded_shape[M] / block_shape[M-1]] +
                                +                                          --      remaining_shape
                                +                                          -- 
                                +                                          -- 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
                                +                                          --    dimension, producing an output tensor of shape:
                                +                                          -- 
                                +                                          --      [batch * prod(block_shape)] +
                                +                                          --      [padded_shape[1] / block_shape[0],
                                +                                          --       ...,
                                +                                          --       padded_shape[M] / block_shape[M-1]] +
                                +                                          --      remaining_shape
                                +                                          -- 
                                +                                          -- Some examples:
                                +                                          -- 
                                +                                          -- (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
                                +                                          --     `paddings = [[0, 0], [0, 0]]`:
                                +                                          -- 
                                +                                          -- ```
                                +                                          -- x = [[[[1], [2]], [[3], [4]]]]
                                +                                          -- ```
                                +                                          -- 
                                +                                          -- The output tensor has shape `[4, 1, 1, 1]` and value:
                                +                                          -- 
                                +                                          -- ```
                                +                                          -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
                                +                                          -- ```
                                +                                          -- 
                                +                                          -- (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
                                +                                          --     `paddings = [[0, 0], [0, 0]]`:
                                +                                          -- 
                                +                                          -- ```
                                +                                          -- x = [[[[1, 2, 3], [4, 5, 6]],
                                +                                          --       [[7, 8, 9], [10, 11, 12]]]]
                                +                                          -- ```
                                +                                          -- 
                                +                                          -- The output tensor has shape `[4, 1, 1, 3]` and value:
                                +                                          -- 
                                +                                          -- ```
                                +                                          -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
                                +                                          -- ```
                                +                                          -- 
                                +                                          -- (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
                                +                                          --     `paddings = [[0, 0], [0, 0]]`:
                                +                                          -- 
                                +                                          -- ```
                                +                                          -- x = [[[[1],   [2],  [3],  [4]],
                                +                                          --       [[5],   [6],  [7],  [8]],
                                +                                          --       [[9],  [10], [11],  [12]],
                                +                                          --       [[13], [14], [15],  [16]]]]
                                +                                          -- ```
                                +                                          -- 
                                +                                          -- The output tensor has shape `[4, 2, 2, 1]` and value:
                                +                                          -- 
                                +                                          -- ```
                                +                                          -- x = [[[[1], [3]], [[9], [11]]],
                                +                                          --      [[[2], [4]], [[10], [12]]],
                                +                                          --      [[[5], [7]], [[13], [15]]],
                                +                                          --      [[[6], [8]], [[14], [16]]]]
                                +                                          -- ```
                                +                                          -- 
                                +                                          -- (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
                                +                                          --     paddings = `[[0, 0], [2, 0]]`:
                                +                                          -- 
                                +                                          -- ```
                                +                                          -- x = [[[[1],   [2],  [3],  [4]],
                                +                                          --       [[5],   [6],  [7],  [8]]],
                                +                                          --      [[[9],  [10], [11],  [12]],
                                +                                          --       [[13], [14], [15],  [16]]]]
                                +                                          -- ```
                                +                                          -- 
                                +                                          -- The output tensor has shape `[8, 1, 3, 1]` and value:
                                +                                          -- 
                                +                                          -- ```
                                +                                          -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
                                +                                          --      [[[0], [2], [4]]], [[[0], [10], [12]]],
                                +                                          --      [[[0], [5], [7]]], [[[0], [13], [15]]],
                                +                                          --      [[[0], [6], [8]]], [[[0], [14], [16]]]]
                                +                                          -- ```
                                +                                          -- 
                                +                                          -- Among others, this operation is useful for reducing atrous convolution into
                                +                                          -- regular convolution.
                                +                  -> Tensor Build t -- ^ __output__
                                +spaceToBatchND = spaceToBatchND' id
                                +spaceToBatchND' :: forall v'1 v'2 v'3 t tblock_shape tpaddings . (TensorType t,
                                +                                                                  OneOf '[Data.Int.Int32,
                                +                                                                          Data.Int.Int64] tblock_shape,
                                +                                                                  OneOf '[Data.Int.Int32,
                                +                                                                          Data.Int.Int64] tpaddings) =>
                                +                   OpParams ->
                                +                   Tensor v'1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
                                +                                -- where spatial_shape has `M` dimensions.
                                +                   -> Tensor v'2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1.
                                +                   -> Tensor v'3 tpaddings -- ^ __paddings__: 2-D with shape `[M, 2]`, all values must be >= 0.
                                +                                           --   `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
                                +                                           --   `i + 1`, which corresponds to spatial dimension `i`.  It is required that
                                +                                           --   `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
                                +                                           -- 
                                +                                           -- This operation is equivalent to the following steps:
                                +                                           -- 
                                +                                           -- 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
                                +                                           --    input according to `paddings` to produce `padded` of shape `padded_shape`.
                                +                                           -- 
                                +                                           -- 2. Reshape `padded` to `reshaped_padded` of shape:
                                +                                           -- 
                                +                                           --      [batch] +
                                +                                           --      [padded_shape[1] / block_shape[0],
                                +                                           --        block_shape[0],
                                +                                           --       ...,
                                +                                           --       padded_shape[M] / block_shape[M-1],
                                +                                           --       block_shape[M-1]] +
                                +                                           --      remaining_shape
                                +                                           -- 
                                +                                           -- 3. Permute dimensions of `reshaped_padded` to produce
                                +                                           --    `permuted_reshaped_padded` of shape:
                                +                                           -- 
                                +                                           --      block_shape +
                                +                                           --      [batch] +
                                +                                           --      [padded_shape[1] / block_shape[0],
                                +                                           --       ...,
                                +                                           --       padded_shape[M] / block_shape[M-1]] +
                                +                                           --      remaining_shape
                                +                                           -- 
                                +                                           -- 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
                                +                                           --    dimension, producing an output tensor of shape:
                                +                                           -- 
                                +                                           --      [batch * prod(block_shape)] +
                                +                                           --      [padded_shape[1] / block_shape[0],
                                +                                           --       ...,
                                +                                           --       padded_shape[M] / block_shape[M-1]] +
                                +                                           --      remaining_shape
                                +                                           -- 
                                +                                           -- Some examples:
                                +                                           -- 
                                +                                           -- (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
                                +                                           --     `paddings = [[0, 0], [0, 0]]`:
                                +                                           -- 
                                +                                           -- ```
                                +                                           -- x = [[[[1], [2]], [[3], [4]]]]
                                +                                           -- ```
                                +                                           -- 
                                +                                           -- The output tensor has shape `[4, 1, 1, 1]` and value:
                                +                                           -- 
                                +                                           -- ```
                                +                                           -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
                                +                                           -- ```
                                +                                           -- 
                                +                                           -- (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
                                +                                           --     `paddings = [[0, 0], [0, 0]]`:
                                +                                           -- 
                                +                                           -- ```
                                +                                           -- x = [[[[1, 2, 3], [4, 5, 6]],
                                +                                           --       [[7, 8, 9], [10, 11, 12]]]]
                                +                                           -- ```
                                +                                           -- 
                                +                                           -- The output tensor has shape `[4, 1, 1, 3]` and value:
                                +                                           -- 
                                +                                           -- ```
                                +                                           -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
                                +                                           -- ```
                                +                                           -- 
                                +                                           -- (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
                                +                                           --     `paddings = [[0, 0], [0, 0]]`:
                                +                                           -- 
                                +                                           -- ```
                                +                                           -- x = [[[[1],   [2],  [3],  [4]],
                                +                                           --       [[5],   [6],  [7],  [8]],
                                +                                           --       [[9],  [10], [11],  [12]],
                                +                                           --       [[13], [14], [15],  [16]]]]
                                +                                           -- ```
                                +                                           -- 
                                +                                           -- The output tensor has shape `[4, 2, 2, 1]` and value:
                                +                                           -- 
                                +                                           -- ```
                                +                                           -- x = [[[[1], [3]], [[9], [11]]],
                                +                                           --      [[[2], [4]], [[10], [12]]],
                                +                                           --      [[[5], [7]], [[13], [15]]],
                                +                                           --      [[[6], [8]], [[14], [16]]]]
                                +                                           -- ```
                                +                                           -- 
                                +                                           -- (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
                                +                                           --     paddings = `[[0, 0], [2, 0]]`:
                                +                                           -- 
                                +                                           -- ```
                                +                                           -- x = [[[[1],   [2],  [3],  [4]],
                                +                                           --       [[5],   [6],  [7],  [8]]],
                                +                                           --      [[[9],  [10], [11],  [12]],
                                +                                           --       [[13], [14], [15],  [16]]]]
                                +                                           -- ```
                                +                                           -- 
                                +                                           -- The output tensor has shape `[8, 1, 3, 1]` and value:
                                +                                           -- 
                                +                                           -- ```
                                +                                           -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
                                +                                           --      [[[0], [2], [4]]], [[[0], [10], [12]]],
                                +                                           --      [[[0], [5], [7]]], [[[0], [13], [15]]],
                                +                                           --      [[[0], [6], [8]]], [[[0], [14], [16]]]]
                                +                                           -- ```
                                +                                           -- 
                                +                                           -- Among others, this operation is useful for reducing atrous convolution into
                                +                                           -- regular convolution.
                                +                   -> Tensor Build t -- ^ __output__
                                +spaceToBatchND' op'options input block_shape paddings | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs block_shape,
                                +                                                             buildInputs paddings]
                                +        return (opDef "SpaceToBatchND"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape)
                                +                & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has `M` dimensions."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "block_shape"
                                +  description: "1-D with shape `[M]`, all values must be >= 1."
                                +  type_attr: "Tblock_shape"
                                +}
                                +input_arg {
                                +  name: "paddings"
                                +  description: "2-D with shape `[M, 2]`, all values must be >= 0.\n  `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n  `i + 1`, which corresponds to spatial dimension `i`.  It is required that\n  `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n\nThis operation is equivalent to the following steps:\n\n1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n   input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n2. Reshape `padded` to `reshaped_padded` of shape:\n\n     [batch] +\n     [padded_shape[1] / block_shape[0],\n       block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1],\n      block_shape[M-1]] +\n     remaining_shape\n\n3. Permute dimensions of `reshaped_padded` to produce\n   `permuted_reshaped_padded` of shape:\n\n     block_shape +\n     [batch] +\n     [padded_shape[1] / block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1]] +\n     remaining_shape\n\n4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n   dimension, producing an output tensor of shape:\n\n     [batch * prod(block_shape)] +\n     [padded_shape[1] / block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1]] +\n     remaining_shape\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]],\n      [[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n    paddings = `[[0, 0], [2, 0]]`:\n\n```\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 3, 1]` and value:\n\n```\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n     [[[0], [2], [4]]], [[[0], [10], [12]]],\n     [[[0], [5], [7]]], [[[0], [13], [15]]],\n     [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution."
                                +  type_attr: "Tpaddings"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tblock_shape"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "Tpaddings"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | SpaceToDepth for tensors of type T.
                                +--
                                +-- Rearranges blocks of spatial data, into depth. More specifically,
                                +-- this op outputs a copy of the input tensor where values from the `height`
                                +-- and `width` dimensions are moved to the `depth` dimension.
                                +-- The attr `block_size` indicates the input block size and how the data is moved.
                                +-- 
                                +--   * Non-overlapping blocks of size `block_size x block size` are rearranged
                                +--     into depth at each location.
                                +--   * The depth of the output tensor is `input_depth * block_size * block_size`.
                                +--   * The input tensor's height and width must be divisible by block_size.
                                +-- 
                                +-- That is, assuming the input is in the shape:
                                +-- `[batch, height, width, depth]`,
                                +-- the shape of the output will be:
                                +-- `[batch, height/block_size, width/block_size, depth*block_size*block_size]`
                                +-- 
                                +-- This operation requires that the input tensor be of rank 4, and that
                                +-- `block_size` be >=1 and a divisor of both the input `height` and `width`.
                                +-- 
                                +-- This operation is useful for resizing the activations between convolutions
                                +-- (but keeping all data), e.g. instead of pooling. It is also useful for training
                                +-- purely convolutional models.
                                +-- 
                                +-- For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:
                                +-- 
                                +-- ```
                                +-- x = [[[[1], [2]],
                                +--       [[3], [4]]]]
                                +-- ```
                                +-- 
                                +-- This operation will output a tensor of shape `[1, 1, 1, 4]`:
                                +-- 
                                +-- ```
                                +-- [[[[1, 2, 3, 4]]]]
                                +-- ```
                                +-- 
                                +-- Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
                                +-- the corresponding output will have a single element (i.e. width and height are
                                +-- both 1) and will have a depth of 4 channels (1 * block_size * block_size).
                                +-- The output element shape is `[1, 1, 4]`.
                                +-- 
                                +-- For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
                                +-- 
                                +-- ```
                                +-- x = [[[[1, 2, 3], [4, 5, 6]],
                                +--       [[7, 8, 9], [10, 11, 12]]]]
                                +-- ```
                                +-- 
                                +-- This operation, for block_size of 2, will return the following tensor of shape
                                +-- `[1, 1, 1, 12]`
                                +-- 
                                +-- ```
                                +-- [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
                                +-- ```
                                +-- 
                                +-- Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
                                +-- 
                                +-- ```
                                +-- x = [[[[1],   [2],  [5],  [6]],
                                +--       [[3],   [4],  [7],  [8]],
                                +--       [[9],  [10], [13],  [14]],
                                +--       [[11], [12], [15],  [16]]]]
                                +-- ```
                                +-- 
                                +-- the operator will return the following tensor of shape `[1 2 2 4]`:
                                +-- 
                                +-- ```
                                +-- x = [[[[1, 2, 3, 4],
                                +--        [5, 6, 7, 8]],
                                +--       [[9, 10, 11, 12],
                                +--        [13, 14, 15, 16]]]]
                                +-- ```
                                +spaceToDepth :: forall v'1 t . (TensorType t) => 
                                +                Data.Int.Int64 -- ^ __block_size__: The size of the spatial block.
                                +                -> Tensor v'1 t -- ^ __input__
                                +                -> Tensor Build t -- ^ __output__
                                +spaceToDepth = spaceToDepth' id
                                +spaceToDepth' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                 Data.Int.Int64 -- ^ __block_size__: The size of the spatial block.
                                +                 -> Tensor v'1 t -- ^ __input__
                                +                 -> Tensor Build t -- ^ __output__
                                +spaceToDepth' op'options block_size input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "SpaceToDepth"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "block_size" .~ block_size
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "block_size"
                                +  type: "int"
                                +  description: "The size of the spatial block."
                                +  has_minimum: true
                                +  minimum: 2
                                +}
                                +-}
                                +
                                +-- | Applies a sparse gradient to a given accumulator.
                                +--
                                +-- Does not add if local_step is smaller than the accumulator's
                                +-- global_step.
                                +sparseAccumulatorApplyGradient :: forall v'2 v'3 v'4 v'5 dtype
                                +                                  m' . (MonadBuild m',
                                +                                        OneOf '[(Data.Complex.Complex Double),
                                +                                                (Data.Complex.Complex Float),
                                +                                                Data.Int.Int16, Data.Int.Int32,
                                +                                                Data.Int.Int64, Data.Int.Int8,
                                +                                                Data.Word.Word16,
                                +                                                Data.Word.Word8, Double,
                                +                                                Float] dtype) => 
                                +                                  Bool -- ^ __has_known_shape__: Boolean indicating whether gradient_shape is unknown, in which
                                +                                       -- case the input is ignored during validation.
                                +                                  -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator.
                                +                                  -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the sparse gradient was computed.
                                +                                  -> Tensor v'3 Data.Int.Int64 -- ^ __gradient_indices__: Indices of the sparse gradient to be accumulated. Must be a
                                +                                                               -- vector.
                                +                                  -> Tensor v'4 dtype -- ^ __gradient_values__: Values are the non-zero slices of the gradient, and must have
                                +                                                      -- the same first dimension as indices, i.e., the nnz represented by indices and
                                +                                                      -- values must be consistent.
                                +                                  -> Tensor v'5 Data.Int.Int64 -- ^ __gradient_shape__: Shape of the sparse gradient to be accumulated.
                                +                                  -> m' (ControlNode)
                                +sparseAccumulatorApplyGradient = sparseAccumulatorApplyGradient' id
                                +sparseAccumulatorApplyGradient' :: forall v'2 v'3 v'4 v'5 dtype
                                +                                   m' . (MonadBuild m',
                                +                                         OneOf '[(Data.Complex.Complex Double),
                                +                                                 (Data.Complex.Complex Float),
                                +                                                 Data.Int.Int16, Data.Int.Int32,
                                +                                                 Data.Int.Int64, Data.Int.Int8,
                                +                                                 Data.Word.Word16,
                                +                                                 Data.Word.Word8, Double,
                                +                                                 Float] dtype) => OpParams ->
                                +                                   Bool -- ^ __has_known_shape__: Boolean indicating whether gradient_shape is unknown, in which
                                +                                        -- case the input is ignored during validation.
                                +                                   -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator.
                                +                                   -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the sparse gradient was computed.
                                +                                   -> Tensor v'3 Data.Int.Int64 -- ^ __gradient_indices__: Indices of the sparse gradient to be accumulated. Must be a
                                +                                                                -- vector.
                                +                                   -> Tensor v'4 dtype -- ^ __gradient_values__: Values are the non-zero slices of the gradient, and must have
                                +                                                       -- the same first dimension as indices, i.e., the nnz represented by indices and
                                +                                                       -- values must be consistent.
                                +                                   -> Tensor v'5 Data.Int.Int64 -- ^ __gradient_shape__: Shape of the sparse gradient to be accumulated.
                                +                                   -> m' (ControlNode)
                                +sparseAccumulatorApplyGradient' op'options has_known_shape handle local_step
                                +                                gradient_indices gradient_values
                                +                                gradient_shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs local_step,
                                +                                                             buildInputs gradient_indices,
                                +                                                             buildInputs gradient_values,
                                +                                                             buildInputs gradient_shape]
                                +        buildOp [] (opDef "SparseAccumulatorApplyGradient"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "has_known_shape" .~ has_known_shape
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a accumulator."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "local_step"
                                +  description: "The local_step value at which the sparse gradient was computed."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "gradient_indices"
                                +  description: "Indices of the sparse gradient to be accumulated. Must be a\nvector."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "gradient_values"
                                +  description: "Values are the non-zero slices of the gradient, and must have\nthe same first dimension as indices, i.e., the nnz represented by indices and\nvalues must be consistent."
                                +  type_attr: "dtype"
                                +}
                                +input_arg {
                                +  name: "gradient_shape"
                                +  description: "Shape of the sparse gradient to be accumulated."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "has_known_shape"
                                +  type: "bool"
                                +  description: "Boolean indicating whether gradient_shape is unknown, in which\ncase the input is ignored during validation."
                                +}
                                +-}
                                +
                                +-- | Extracts the average sparse gradient in a SparseConditionalAccumulator.
                                +--
                                +-- The op will blocks until sufficient (i.e., more than num_required)
                                +-- gradients have been accumulated. If the accumulator has already
                                +-- aggregated more than num_required gradients, it will return its
                                +-- average of the accumulated gradients.  Also automatically increments
                                +-- the recorded global_step in the accumulator by 1, and resets the
                                +-- aggregate to 0.
                                +sparseAccumulatorTakeGradient :: forall v'2 dtype m' . (MonadBuild m',
                                +                                                        OneOf '[(Data.Complex.Complex Double),
                                +                                                                (Data.Complex.Complex Float),
                                +                                                                Data.Int.Int16,
                                +                                                                Data.Int.Int32,
                                +                                                                Data.Int.Int64,
                                +                                                                Data.Int.Int8,
                                +                                                                Data.Word.Word16,
                                +                                                                Data.Word.Word8,
                                +                                                                Double,
                                +                                                                Float] dtype) =>
                                +                                 
                                +                                 Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a SparseConditionalAccumulator.
                                +                                 -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate.
                                +                                 -> m' ((Tensor Value Data.Int.Int64,
                                +                                         Tensor Value dtype,
                                +                                         Tensor Value Data.Int.Int64))
                                +                                 -- ^ (__indices__, __values__, __shape__)
                                +                                 --
                                +                                 -- * __indices__: Indices of the average of the accumulated sparse gradients.
                                +                                 --
                                +                                 -- * __values__: Values of the average of the accumulated sparse gradients.
                                +                                 --
                                +                                 -- * __shape__: Shape of the average of the accumulated sparse gradients.
                                +sparseAccumulatorTakeGradient = sparseAccumulatorTakeGradient' id
                                +sparseAccumulatorTakeGradient' :: forall v'2 dtype m' . (MonadBuild m',
                                +                                                         OneOf '[(Data.Complex.Complex Double),
                                +                                                                 (Data.Complex.Complex Float),
                                +                                                                 Data.Int.Int16,
                                +                                                                 Data.Int.Int32,
                                +                                                                 Data.Int.Int64,
                                +                                                                 Data.Int.Int8,
                                +                                                                 Data.Word.Word16,
                                +                                                                 Data.Word.Word8,
                                +                                                                 Double,
                                +                                                                 Float] dtype) =>
                                +                                  OpParams ->
                                +                                  Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a SparseConditionalAccumulator.
                                +                                  -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate.
                                +                                  -> m' ((Tensor Value Data.Int.Int64,
                                +                                          Tensor Value dtype,
                                +                                          Tensor Value Data.Int.Int64))
                                +                                  -- ^ (__indices__, __values__, __shape__)
                                +                                  --
                                +                                  -- * __indices__: Indices of the average of the accumulated sparse gradients.
                                +                                  --
                                +                                  -- * __values__: Values of the average of the accumulated sparse gradients.
                                +                                  --
                                +                                  -- * __shape__: Shape of the average of the accumulated sparse gradients.
                                +sparseAccumulatorTakeGradient' op'options handle
                                +                               num_required | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs num_required]
                                +        buildOp [] (opDef "SparseAccumulatorTakeGradient"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a SparseConditionalAccumulator."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "num_required"
                                +  description: "Number of gradients required before we return an aggregate."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "indices"
                                +  description: "Indices of the average of the accumulated sparse gradients."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "values"
                                +  description: "Values of the average of the accumulated sparse gradients."
                                +  type_attr: "dtype"
                                +}
                                +output_arg {
                                +  name: "shape"
                                +  description: "Shape of the average of the accumulated sparse gradients."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Adds two `SparseTensor` objects to produce another `SparseTensor`.
                                +--
                                +-- The input `SparseTensor` objects' indices are assumed ordered in standard
                                +-- lexicographic order.  If this is not the case, before this step run
                                +-- `SparseReorder` to restore index ordering.
                                +-- 
                                +-- By default, if two values sum to zero at some index, the output `SparseTensor`
                                +-- would still include that particular location in its index, storing a zero in the
                                +-- corresponding value slot.  To override this, callers can specify `thresh`,
                                +-- indicating that if the sum has a magnitude strictly smaller than `thresh`, its
                                +-- corresponding value and index would then not be included.  In particular,
                                +-- `thresh == 0` (default) means everything is kept and actual thresholding happens
                                +-- only for a positive value.
                                +-- 
                                +-- In the following shapes, `nnz` is the count after taking `thresh` into account.
                                +sparseAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t
                                +             treal . (OneOf '[(Data.Complex.Complex Double),
                                +                              (Data.Complex.Complex Float), Data.Int.Int16,
                                +                              Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                              Data.Word.Word16, Data.Word.Word8, Double,
                                +                              Float] t, OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                                Data.Int.Int64, Data.Int.Int8,
                                +                                                Data.Word.Word16,
                                +                                                Data.Word.Word8, Double,
                                +                                                Float] treal) => 
                                +             Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
                                +             -> Tensor v'2 t -- ^ __a_values__: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.
                                +             -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
                                +             -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
                                +             -> Tensor v'5 t -- ^ __b_values__: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.
                                +             -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
                                +             -> Tensor v'7 treal -- ^ __thresh__: 0-D.  The magnitude threshold that determines if an output value/index
                                +                                 -- pair takes space.
                                +             -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                 Tensor Build Data.Int.Int64)
                                +             -- ^ (__sum_indices__, __sum_values__, __sum_shape__)
                                +             --
                                +             -- * __sum_indices__
                                +             --
                                +             -- * __sum_values__
                                +             --
                                +             -- * __sum_shape__
                                +sparseAdd = sparseAdd' id
                                +sparseAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t
                                +              treal . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Data.Int.Int16,
                                +                               Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                               Data.Word.Word16, Data.Word.Word8, Double,
                                +                               Float] t, OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                                 Data.Int.Int64, Data.Int.Int8,
                                +                                                 Data.Word.Word16,
                                +                                                 Data.Word.Word8, Double,
                                +                                                 Float] treal) => OpParams ->
                                +              Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
                                +              -> Tensor v'2 t -- ^ __a_values__: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.
                                +              -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
                                +              -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
                                +              -> Tensor v'5 t -- ^ __b_values__: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.
                                +              -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
                                +              -> Tensor v'7 treal -- ^ __thresh__: 0-D.  The magnitude threshold that determines if an output value/index
                                +                                  -- pair takes space.
                                +              -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                  Tensor Build Data.Int.Int64)
                                +              -- ^ (__sum_indices__, __sum_values__, __sum_shape__)
                                +              --
                                +              -- * __sum_indices__
                                +              --
                                +              -- * __sum_values__
                                +              --
                                +              -- * __sum_shape__
                                +sparseAdd' op'options a_indices a_values a_shape b_indices b_values b_shape
                                +           thresh | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices,
                                +                                                             buildInputs a_values,
                                +                                                             buildInputs a_shape,
                                +                                                             buildInputs b_indices,
                                +                                                             buildInputs b_values,
                                +                                                             buildInputs b_shape,
                                +                                                             buildInputs thresh]
                                +        return (opDef "SparseAdd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Treal" .~ tensorType (undefined :: treal)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "a_indices"
                                +  description: "2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "a_values"
                                +  description: "1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "a_shape"
                                +  description: "1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "b_indices"
                                +  description: "2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "b_values"
                                +  description: "1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "b_shape"
                                +  description: "1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "thresh"
                                +  description: "0-D.  The magnitude threshold that determines if an output value/index\npair takes space."
                                +  type_attr: "Treal"
                                +}
                                +output_arg { name: "sum_indices" type: DT_INT64 }
                                +output_arg { name: "sum_values" type_attr: "T" }
                                +output_arg { name: "sum_shape" type: DT_INT64 }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Treal"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | The gradient operator for the SparseAdd op.
                                +--
                                +-- The SparseAdd op calculates A + B, where A, B, and the sum are all represented
                                +-- as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.
                                +-- non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
                                +-- values of A and B.
                                +sparseAddGrad :: forall v'1 v'2 v'3 v'4
                                +                 t . (OneOf '[(Data.Complex.Complex Double),
                                +                              (Data.Complex.Complex Float), Data.Int.Int16,
                                +                              Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                              Data.Word.Word16, Data.Word.Word8, Double,
                                +                              Float] t) => 
                                +                 Tensor v'1 t -- ^ __backprop_val_grad__: 1-D with shape `[nnz(sum)]`.  The gradient with respect to
                                +                              -- the non-empty values of the sum.
                                +                 -> Tensor v'2 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
                                +                 -> Tensor v'3 Data.Int.Int64 -- ^ __b_indices__: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
                                +                 -> Tensor v'4 Data.Int.Int64 -- ^ __sum_indices__: 2-D.  The `indices` of the sum `SparseTensor`, size
                                +                                              -- `[nnz(sum), ndims]`.
                                +                 -> (Tensor Build t, Tensor Build t)
                                +                 -- ^ (__a_val_grad__, __b_val_grad__)
                                +                 --
                                +                 -- * __a_val_grad__: 1-D with shape `[nnz(A)]`. The gradient with respect to the
                                +                 -- non-empty values of A.
                                +                 --
                                +                 -- * __b_val_grad__: 1-D with shape `[nnz(B)]`. The gradient with respect to the
                                +                 -- non-empty values of B.
                                +sparseAddGrad = sparseAddGrad' id
                                +sparseAddGrad' :: forall v'1 v'2 v'3 v'4
                                +                  t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Data.Int.Int16,
                                +                               Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                               Data.Word.Word16, Data.Word.Word8, Double,
                                +                               Float] t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __backprop_val_grad__: 1-D with shape `[nnz(sum)]`.  The gradient with respect to
                                +                               -- the non-empty values of the sum.
                                +                  -> Tensor v'2 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
                                +                  -> Tensor v'3 Data.Int.Int64 -- ^ __b_indices__: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
                                +                  -> Tensor v'4 Data.Int.Int64 -- ^ __sum_indices__: 2-D.  The `indices` of the sum `SparseTensor`, size
                                +                                               -- `[nnz(sum), ndims]`.
                                +                  -> (Tensor Build t, Tensor Build t)
                                +                  -- ^ (__a_val_grad__, __b_val_grad__)
                                +                  --
                                +                  -- * __a_val_grad__: 1-D with shape `[nnz(A)]`. The gradient with respect to the
                                +                  -- non-empty values of A.
                                +                  --
                                +                  -- * __b_val_grad__: 1-D with shape `[nnz(B)]`. The gradient with respect to the
                                +                  -- non-empty values of B.
                                +sparseAddGrad' op'options backprop_val_grad a_indices b_indices
                                +               sum_indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs backprop_val_grad,
                                +                                                             buildInputs a_indices,
                                +                                                             buildInputs b_indices,
                                +                                                             buildInputs sum_indices]
                                +        return (opDef "SparseAddGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "backprop_val_grad"
                                +  description: "1-D with shape `[nnz(sum)]`.  The gradient with respect to\nthe non-empty values of the sum."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "a_indices"
                                +  description: "2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "b_indices"
                                +  description: "2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sum_indices"
                                +  description: "2-D.  The `indices` of the sum `SparseTensor`, size\n`[nnz(sum), ndims]`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "a_val_grad"
                                +  description: "1-D with shape `[nnz(A)]`. The gradient with respect to the\nnon-empty values of A."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "b_val_grad"
                                +  description: "1-D with shape `[nnz(B)]`. The gradient with respect to the\nnon-empty values of B."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | var: Should be from a Variable().
                                +
                                +sparseApplyAdadelta :: forall v'4 v'5 v'6 v'7 v'8 t tindices
                                +                       m' . (MonadBuild m',
                                +                             OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float),
                                +                                     Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t, OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +                       
                                +                       Tensor Ref t -- ^ __var__
                                +                       -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                       -> Tensor Ref t -- ^ __accum_update__: : Should be from a Variable().
                                +                       -> Tensor v'4 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                       -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar.
                                +                       -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
                                +                       -> Tensor v'7 t -- ^ __grad__: The gradient.
                                +                       -> Tensor v'8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                       -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyAdadelta = sparseApplyAdadelta' id
                                +sparseApplyAdadelta' :: forall v'4 v'5 v'6 v'7 v'8 t tindices
                                +                        m' . (MonadBuild m',
                                +                              OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float),
                                +                                      Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Int.Int64, Data.Int.Int8,
                                +                                      Data.Word.Word16, Data.Word.Word8, Double,
                                +                                      Float] t, OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] tindices) =>
                                +                        OpParams ->
                                +                        Tensor Ref t -- ^ __var__
                                +                        -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                        -> Tensor Ref t -- ^ __accum_update__: : Should be from a Variable().
                                +                        -> Tensor v'4 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                        -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar.
                                +                        -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
                                +                        -> Tensor v'7 t -- ^ __grad__: The gradient.
                                +                        -> Tensor v'8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                        -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyAdadelta' op'options var accum accum_update lr rho epsilon grad
                                +                     indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs accum_update,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "SparseApplyAdadelta"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "var" type_attr: "T" is_ref: true }
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum_update"
                                +  description: ": Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Constant factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
                                +--
                                +-- That is for rows we have grad for, we update var and accum as follows:
                                +-- accum += grad * grad
                                +-- var -= lr * grad * (1 / sqrt(accum))
                                +sparseApplyAdagrad :: forall v'3 v'4 v'5 t tindices m' . (MonadBuild m',
                                +                                                          OneOf '[(Data.Complex.Complex Double),
                                +                                                                  (Data.Complex.Complex Float),
                                +                                                                  Data.Int.Int16,
                                +                                                                  Data.Int.Int32,
                                +                                                                  Data.Int.Int64,
                                +                                                                  Data.Int.Int8,
                                +                                                                  Data.Word.Word16,
                                +                                                                  Data.Word.Word8,
                                +                                                                  Double,
                                +                                                                  Float] t,
                                +                                                          OneOf '[Data.Int.Int32,
                                +                                                                  Data.Int.Int64] tindices) =>
                                +                      
                                +                      Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                      -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                      -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                      -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                      -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                      -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyAdagrad = sparseApplyAdagrad' id
                                +sparseApplyAdagrad' :: forall v'3 v'4 v'5 t tindices m' . (MonadBuild m',
                                +                                                           OneOf '[(Data.Complex.Complex Double),
                                +                                                                   (Data.Complex.Complex Float),
                                +                                                                   Data.Int.Int16,
                                +                                                                   Data.Int.Int32,
                                +                                                                   Data.Int.Int64,
                                +                                                                   Data.Int.Int8,
                                +                                                                   Data.Word.Word16,
                                +                                                                   Data.Word.Word8,
                                +                                                                   Double,
                                +                                                                   Float] t,
                                +                                                           OneOf '[Data.Int.Int32,
                                +                                                                   Data.Int.Int64] tindices) =>
                                +                       OpParams ->
                                +                       Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                       -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                       -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                       -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                       -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                       -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyAdagrad' op'options var accum lr grad indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "SparseApplyAdagrad"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
                                +
                                +sparseApplyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices
                                +                        m' . (MonadBuild m',
                                +                              OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float),
                                +                                      Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Int.Int64, Data.Int.Int8,
                                +                                      Data.Word.Word16, Data.Word.Word8, Double,
                                +                                      Float] t, OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] tindices) =>
                                +                        
                                +                        Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                        -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable().
                                +                        -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable().
                                +                        -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                        -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                        -> Tensor v'6 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                        -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                        -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                        -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
                                +                        -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyAdagradDA = sparseApplyAdagradDA' id
                                +sparseApplyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices
                                +                         m' . (MonadBuild m',
                                +                               OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t, OneOf '[Data.Int.Int32,
                                +                                                                 Data.Int.Int64] tindices) =>
                                +                         OpParams ->
                                +                         Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                         -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable().
                                +                         -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable().
                                +                         -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                         -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                         -> Tensor v'6 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                         -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                         -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                         -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
                                +                         -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyAdagradDA' op'options var gradient_accumulator
                                +                      gradient_squared_accumulator grad indices lr l1 l2
                                +                      global_step | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs gradient_accumulator,
                                +                                                             buildInputs gradient_squared_accumulator,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs global_step]
                                +        buildOp [] (opDef "SparseApplyAdagradDA"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "gradient_accumulator"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "gradient_squared_accumulator"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "global_step"
                                +  description: "Training step number. Must be a scalar."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the centered RMSProp algorithm.
                                +--
                                +-- The centered RMSProp algorithm uses an estimate of the centered second moment
                                +-- (i.e., the variance) for normalization, as opposed to regular RMSProp, which
                                +-- uses the (uncentered) second moment. This often helps with training, but is
                                +-- slightly more expensive in terms of computation and memory.
                                +-- 
                                +-- Note that in dense implementation of this algorithm, mg, ms, and mom will
                                +-- update even if the grad is zero, but in this sparse implementation, mg, ms,
                                +-- and mom will not update in iterations during which the grad is zero.
                                +-- 
                                +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
                                +-- mean_grad = decay * mean_grad + (1-decay) * gradient
                                +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
                                +-- 
                                +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
                                +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
                                +-- var <- var - mom
                                +sparseApplyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices
                                +                              m' . (MonadBuild m',
                                +                                    OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t,
                                +                                    OneOf '[Data.Int.Int32,
                                +                                            Data.Int.Int64] tindices) => 
                                +                              Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                              -> Tensor Ref t -- ^ __mg__: Should be from a Variable().
                                +                              -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
                                +                              -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
                                +                              -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                              -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                              -> Tensor v'7 t -- ^ __momentum__
                                +                              -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                              -> Tensor v'9 t -- ^ __grad__: The gradient.
                                +                              -> Tensor v'10 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
                                +                              -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyCenteredRMSProp = sparseApplyCenteredRMSProp' id
                                +sparseApplyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices
                                +                               m' . (MonadBuild m',
                                +                                     OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float),
                                +                                             Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t,
                                +                                     OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] tindices) =>
                                +                               OpParams ->
                                +                               Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                               -> Tensor Ref t -- ^ __mg__: Should be from a Variable().
                                +                               -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
                                +                               -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
                                +                               -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                               -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                               -> Tensor v'7 t -- ^ __momentum__
                                +                               -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                               -> Tensor v'9 t -- ^ __grad__: The gradient.
                                +                               -> Tensor v'10 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
                                +                               -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon
                                +                            grad indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs mg,
                                +                                                             buildInputs ms,
                                +                                                             buildInputs mom,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs momentum,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "SparseApplyCenteredRMSProp"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "mg"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "ms"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "mom"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "momentum" type_attr: "T" }
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var, ms and mom."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update relevant entries in '*var' according to the Ftrl-proximal scheme.
                                +--
                                +-- That is for rows we have grad for, we update var, accum and linear as follows:
                                +-- accum_new = accum + grad * grad
                                +-- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
                                +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
                                +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
                                +-- accum = accum_new
                                +sparseApplyFtrl :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices
                                +                   m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double),
                                +                                                (Data.Complex.Complex Float),
                                +                                                Data.Int.Int16, Data.Int.Int32,
                                +                                                Data.Int.Int64, Data.Int.Int8,
                                +                                                Data.Word.Word16,
                                +                                                Data.Word.Word8, Double,
                                +                                                Float] t,
                                +                         OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => 
                                +                   Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                   -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                   -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
                                +                   -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                   -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                   -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                   -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                   -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                   -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                   -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyFtrl = sparseApplyFtrl' id
                                +sparseApplyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices
                                +                    m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double),
                                +                                                 (Data.Complex.Complex Float),
                                +                                                 Data.Int.Int16, Data.Int.Int32,
                                +                                                 Data.Int.Int64, Data.Int.Int8,
                                +                                                 Data.Word.Word16,
                                +                                                 Data.Word.Word8, Double,
                                +                                                 Float] t,
                                +                          OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) =>
                                +                    OpParams ->
                                +                    Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                    -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                    -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
                                +                    -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                    -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                    -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                    -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                    -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                    -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                    -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyFtrl' op'options var accum linear grad indices lr l1 l2
                                +                 lr_power | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs linear,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs lr_power]
                                +        buildOp [] (opDef "SparseApplyFtrl"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "linear"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lr_power"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update relevant entries in '*var' according to the Ftrl-proximal scheme.
                                +--
                                +-- That is for rows we have grad for, we update var, accum and linear as follows:
                                +-- grad_with_shrinkage = grad + 2 * l2_shrinkage * var
                                +-- accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
                                +-- linear += grad_with_shrinkage +
                                +--     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
                                +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
                                +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
                                +-- accum = accum_new
                                +sparseApplyFtrlV2 :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices
                                +                     m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double),
                                +                                                  (Data.Complex.Complex Float),
                                +                                                  Data.Int.Int16,
                                +                                                  Data.Int.Int32,
                                +                                                  Data.Int.Int64, Data.Int.Int8,
                                +                                                  Data.Word.Word16,
                                +                                                  Data.Word.Word8, Double,
                                +                                                  Float] t,
                                +                           OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => 
                                +                     Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                     -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                     -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
                                +                     -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                     -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                     -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                     -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                     -> Tensor v'8 t -- ^ __l2__: L2 shrinkage regulariation. Must be a scalar.
                                +                     -> Tensor v'9 t -- ^ __l2_shrinkage__
                                +                     -> Tensor v'10 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                     -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyFtrlV2 = sparseApplyFtrlV2' id
                                +sparseApplyFtrlV2' :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices
                                +                      m' . (MonadBuild m',
                                +                            OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t, OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +                      OpParams ->
                                +                      Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                      -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                      -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
                                +                      -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                      -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                      -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                      -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                      -> Tensor v'8 t -- ^ __l2__: L2 shrinkage regulariation. Must be a scalar.
                                +                      -> Tensor v'9 t -- ^ __l2_shrinkage__
                                +                      -> Tensor v'10 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
                                +                      -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyFtrlV2' op'options var accum linear grad indices lr l1 l2
                                +                   l2_shrinkage lr_power | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs linear,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs l2_shrinkage,
                                +                                                             buildInputs lr_power]
                                +        buildOp [] (opDef "SparseApplyFtrlV2"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "linear"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 shrinkage regulariation. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "l2_shrinkage" type_attr: "T" }
                                +input_arg {
                                +  name: "lr_power"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Update relevant entries in '*var' and '*accum' according to the momentum scheme.
                                +--
                                +-- Set use_nesterov = True if you want to use Nesterov momentum.
                                +-- 
                                +-- That is for rows we have grad for, we update var and accum as follows:
                                +-- 
                                +-- accum = accum * momentum + grad
                                +-- var -= lr * accum
                                +sparseApplyMomentum :: forall v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m',
                                +                                                               OneOf '[(Data.Complex.Complex Double),
                                +                                                                       (Data.Complex.Complex Float),
                                +                                                                       Data.Int.Int16,
                                +                                                                       Data.Int.Int32,
                                +                                                                       Data.Int.Int64,
                                +                                                                       Data.Int.Int8,
                                +                                                                       Data.Word.Word16,
                                +                                                                       Data.Word.Word8,
                                +                                                                       Double,
                                +                                                                       Float] t,
                                +                                                               OneOf '[Data.Int.Int32,
                                +                                                                       Data.Int.Int64] tindices) =>
                                +                       
                                +                       Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                       -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                       -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                       -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                       -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                       -> Tensor v'6 t -- ^ __momentum__: Momentum. Must be a scalar.
                                +                       -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyMomentum = sparseApplyMomentum' id
                                +sparseApplyMomentum' :: forall v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m',
                                +                                                                OneOf '[(Data.Complex.Complex Double),
                                +                                                                        (Data.Complex.Complex Float),
                                +                                                                        Data.Int.Int16,
                                +                                                                        Data.Int.Int32,
                                +                                                                        Data.Int.Int64,
                                +                                                                        Data.Int.Int8,
                                +                                                                        Data.Word.Word16,
                                +                                                                        Data.Word.Word8,
                                +                                                                        Double,
                                +                                                                        Float] t,
                                +                                                                OneOf '[Data.Int.Int32,
                                +                                                                        Data.Int.Int64] tindices) =>
                                +                        OpParams ->
                                +                        Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                        -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                        -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                        -> Tensor v'4 t -- ^ __grad__: The gradient.
                                +                        -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                        -> Tensor v'6 t -- ^ __momentum__: Momentum. Must be a scalar.
                                +                        -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyMomentum' op'options var accum lr grad indices
                                +                     momentum | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs momentum]
                                +        buildOp [] (opDef "SparseApplyMomentum"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "momentum"
                                +  description: "Momentum. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +attr {
                                +  name: "use_nesterov"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
                                +}
                                +-}
                                +
                                +-- | Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
                                +--
                                +-- That is for rows we have grad for, we update var and accum as follows:
                                +-- accum += grad * grad
                                +-- prox_v = var
                                +-- prox_v -= lr * grad * (1 / sqrt(accum))
                                +-- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
                                +sparseApplyProximalAdagrad :: forall v'3 v'4 v'5 v'6 v'7 t tindices
                                +                              m' . (MonadBuild m',
                                +                                    OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t,
                                +                                    OneOf '[Data.Int.Int32,
                                +                                            Data.Int.Int64] tindices) => 
                                +                              Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                              -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                              -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                              -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                              -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                              -> Tensor v'6 t -- ^ __grad__: The gradient.
                                +                              -> Tensor v'7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                              -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyProximalAdagrad = sparseApplyProximalAdagrad' id
                                +sparseApplyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 v'7 t tindices
                                +                               m' . (MonadBuild m',
                                +                                     OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float),
                                +                                             Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t,
                                +                                     OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] tindices) =>
                                +                               OpParams ->
                                +                               Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                               -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
                                +                               -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar.
                                +                               -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                               -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                               -> Tensor v'6 t -- ^ __grad__: The gradient.
                                +                               -> Tensor v'7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                               -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyProximalAdagrad' op'options var accum lr l1 l2 grad
                                +                            indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs accum,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "SparseApplyProximalAdagrad"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "accum"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Learning rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Sparse update '*var' as FOBOS algorithm with fixed learning rate.
                                +--
                                +-- That is for rows we have grad for, we update var as follows:
                                +-- prox_v = var - alpha * grad
                                +-- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
                                +sparseApplyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 v'6 t tindices
                                +                                      m' . (MonadBuild m',
                                +                                            OneOf '[(Data.Complex.Complex Double),
                                +                                                    (Data.Complex.Complex Float),
                                +                                                    Data.Int.Int16,
                                +                                                    Data.Int.Int32,
                                +                                                    Data.Int.Int64,
                                +                                                    Data.Int.Int8,
                                +                                                    Data.Word.Word16,
                                +                                                    Data.Word.Word8, Double,
                                +                                                    Float] t,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] tindices) =>
                                +                                      
                                +                                      Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                                      -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                      -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                      -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                      -> Tensor v'5 t -- ^ __grad__: The gradient.
                                +                                      -> Tensor v'6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                      -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyProximalGradientDescent = sparseApplyProximalGradientDescent' id
                                +sparseApplyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 v'6 t tindices
                                +                                       m' . (MonadBuild m',
                                +                                             OneOf '[(Data.Complex.Complex Double),
                                +                                                     (Data.Complex.Complex Float),
                                +                                                     Data.Int.Int16,
                                +                                                     Data.Int.Int32,
                                +                                                     Data.Int.Int64,
                                +                                                     Data.Int.Int8,
                                +                                                     Data.Word.Word16,
                                +                                                     Data.Word.Word8, Double,
                                +                                                     Float] t,
                                +                                             OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] tindices) =>
                                +                                       OpParams ->
                                +                                       Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                                       -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
                                +                                       -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar.
                                +                                       -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar.
                                +                                       -> Tensor v'5 t -- ^ __grad__: The gradient.
                                +                                       -> Tensor v'6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
                                +                                       -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyProximalGradientDescent' op'options var alpha l1 l2 grad
                                +                                    indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs alpha,
                                +                                                             buildInputs l1,
                                +                                                             buildInputs l2,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "SparseApplyProximalGradientDescent"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "alpha"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l1"
                                +  description: "L1 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "l2"
                                +  description: "L2 regularization. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var and accum."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
                                +}
                                +-}
                                +
                                +-- | Update '*var' according to the RMSProp algorithm.
                                +--
                                +-- Note that in dense implementation of this algorithm, ms and mom will
                                +-- update even if the grad is zero, but in this sparse implementation, ms
                                +-- and mom will not update in iterations during which the grad is zero.
                                +-- 
                                +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
                                +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
                                +-- 
                                +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
                                +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
                                +-- var <- var - mom
                                +sparseApplyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices
                                +                      m' . (MonadBuild m',
                                +                            OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t, OneOf '[Data.Int.Int32,
                                +                                                      Data.Int.Int64] tindices) =>
                                +                      
                                +                      Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                      -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
                                +                      -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
                                +                      -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                      -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                      -> Tensor v'6 t -- ^ __momentum__
                                +                      -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                      -> Tensor v'8 t -- ^ __grad__: The gradient.
                                +                      -> Tensor v'9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
                                +                      -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyRMSProp = sparseApplyRMSProp' id
                                +sparseApplyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices
                                +                       m' . (MonadBuild m',
                                +                             OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float),
                                +                                     Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t, OneOf '[Data.Int.Int32,
                                +                                                       Data.Int.Int64] tindices) =>
                                +                       OpParams ->
                                +                       Tensor Ref t -- ^ __var__: Should be from a Variable().
                                +                       -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
                                +                       -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
                                +                       -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar.
                                +                       -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar.
                                +                       -> Tensor v'6 t -- ^ __momentum__
                                +                       -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                                +                       -> Tensor v'8 t -- ^ __grad__: The gradient.
                                +                       -> Tensor v'9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
                                +                       -> m' (Tensor Ref t) -- ^ __out__: Same as "var".
                                +sparseApplyRMSProp' op'options var ms mom lr rho momentum epsilon grad
                                +                    indices | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var,
                                +                                                             buildInputs ms,
                                +                                                             buildInputs mom,
                                +                                                             buildInputs lr,
                                +                                                             buildInputs rho,
                                +                                                             buildInputs momentum,
                                +                                                             buildInputs epsilon,
                                +                                                             buildInputs grad,
                                +                                                             buildInputs indices]
                                +        buildOp [] (opDef "SparseApplyRMSProp"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "var"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "ms"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "mom"
                                +  description: "Should be from a Variable()."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +input_arg {
                                +  name: "lr"
                                +  description: "Scaling factor. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "rho"
                                +  description: "Decay rate. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg { name: "momentum" type_attr: "T" }
                                +input_arg {
                                +  name: "epsilon"
                                +  description: "Ridge term. Must be a scalar."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "grad" description: "The gradient." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "A vector of indices into the first dimension of var, ms and mom."
                                +  type_attr: "Tindices"
                                +}
                                +output_arg {
                                +  name: "out"
                                +  description: "Same as \"var\"."
                                +  type_attr: "T"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "use_locking"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
                                +}
                                +-}
                                +
                                +-- | Concatenates a list of `SparseTensor` along the specified dimension.
                                +--
                                +-- Concatenation is with respect to the dense versions of these sparse tensors.
                                +-- It is assumed that each input is a `SparseTensor` whose elements are ordered
                                +-- along increasing dimension number.
                                +-- 
                                +-- All inputs' shapes must match, except for the concat dimension.  The
                                +-- `indices`, `values`, and `shapes` lists must have the same length.
                                +-- 
                                +-- The output shape is identical to the inputs', except along the concat
                                +-- dimension, where it is the sum of the inputs' sizes along that dimension.
                                +-- 
                                +-- The output elements will be resorted to preserve the sort order along
                                +-- increasing dimension number.
                                +-- 
                                +-- This op runs in `O(M log M)` time, where `M` is the total number of non-empty
                                +-- values across all inputs. This is due to the need for an internal sort in
                                +-- order to concatenate efficiently across an arbitrary dimension.
                                +-- 
                                +-- For example, if `concat_dim = 1` and the inputs are
                                +-- 
                                +--     sp_inputs[0]: shape = [2, 3]
                                +--     [0, 2]: "a"
                                +--     [1, 0]: "b"
                                +--     [1, 1]: "c"
                                +-- 
                                +--     sp_inputs[1]: shape = [2, 4]
                                +--     [0, 1]: "d"
                                +--     [0, 2]: "e"
                                +-- 
                                +-- then the output will be
                                +-- 
                                +--     shape = [2, 7]
                                +--     [0, 2]: "a"
                                +--     [0, 4]: "d"
                                +--     [0, 5]: "e"
                                +--     [1, 0]: "b"
                                +--     [1, 1]: "c"
                                +-- 
                                +-- Graphically this is equivalent to doing
                                +-- 
                                +--     [    a] concat [  d e  ] = [    a   d e  ]
                                +--     [b c  ]        [       ]   [b c          ]
                                +sparseConcat :: forall v'1 v'2 v'3 t . (TensorType t) => 
                                +                Data.Int.Int64 -- ^ __concat_dim__: Dimension to concatenate along. Must be in range [-rank, rank),
                                +                               -- where rank is the number of dimensions in each input `SparseTensor`.
                                +                -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__: 2-D.  Indices of each input `SparseTensor`.
                                +                -> [Tensor v'2 t] -- ^ __values__: 1-D.  Non-empty values of each `SparseTensor`.
                                +                -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__: 1-D.  Shapes of each `SparseTensor`.
                                +                -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                    Tensor Build Data.Int.Int64)
                                +                -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +                --
                                +                -- * __output_indices__: 2-D.  Indices of the concatenated `SparseTensor`.
                                +                --
                                +                -- * __output_values__: 1-D.  Non-empty values of the concatenated `SparseTensor`.
                                +                --
                                +                -- * __output_shape__: 1-D.  Shape of the concatenated `SparseTensor`.
                                +sparseConcat = sparseConcat' id
                                +sparseConcat' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams ->
                                +                 Data.Int.Int64 -- ^ __concat_dim__: Dimension to concatenate along. Must be in range [-rank, rank),
                                +                                -- where rank is the number of dimensions in each input `SparseTensor`.
                                +                 -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__: 2-D.  Indices of each input `SparseTensor`.
                                +                 -> [Tensor v'2 t] -- ^ __values__: 1-D.  Non-empty values of each `SparseTensor`.
                                +                 -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__: 1-D.  Shapes of each `SparseTensor`.
                                +                 -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                     Tensor Build Data.Int.Int64)
                                +                 -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +                 --
                                +                 -- * __output_indices__: 2-D.  Indices of the concatenated `SparseTensor`.
                                +                 --
                                +                 -- * __output_values__: 1-D.  Non-empty values of the concatenated `SparseTensor`.
                                +                 --
                                +                 -- * __output_shape__: 1-D.  Shape of the concatenated `SparseTensor`.
                                +sparseConcat' op'options concat_dim indices values
                                +              shapes | eqLengthGuard [("N", [("indices", length indices),
                                +                                             ("values", length values),
                                +                                             ("shapes", length shapes)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices,
                                +                                                             buildInputs values,
                                +                                                             buildInputs shapes]
                                +        return (opDef "SparseConcat"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "concat_dim" .~ concat_dim
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length indices) :: Int64
                                +{-
                                +input_arg {
                                +  name: "indices"
                                +  description: "2-D.  Indices of each input `SparseTensor`."
                                +  type: DT_INT64
                                +  number_attr: "N"
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "1-D.  Non-empty values of each `SparseTensor`."
                                +  type_attr: "T"
                                +  number_attr: "N"
                                +}
                                +input_arg {
                                +  name: "shapes"
                                +  description: "1-D.  Shapes of each `SparseTensor`."
                                +  type: DT_INT64
                                +  number_attr: "N"
                                +}
                                +output_arg {
                                +  name: "output_indices"
                                +  description: "2-D.  Indices of the concatenated `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_values"
                                +  description: "1-D.  Non-empty values of the concatenated `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_shape"
                                +  description: "1-D.  Shape of the concatenated `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "concat_dim"
                                +  type: "int"
                                +  description: "Dimension to concatenate along. Must be in range [-rank, rank),\nwhere rank is the number of dimensions in each input `SparseTensor`."
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 2 }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | A conditional accumulator for aggregating sparse gradients.
                                +--
                                +-- The accumulator accepts gradients marked with local_step greater or
                                +-- equal to the most recent global_step known to the accumulator. The
                                +-- average can be extracted from the accumulator, provided sufficient
                                +-- gradients have been accumulated. Extracting the average automatically
                                +-- resets the aggregate to 0, and increments the global_step recorded by
                                +-- the accumulator.
                                +sparseConditionalAccumulator :: forall m' . (MonadBuild m') => 
                                +                                DataType -- ^ __dtype__: The type of the value being accumulated.
                                +                                -> Shape -- ^ __shape__: The shape of the values.
                                +                                -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the accumulator.
                                +sparseConditionalAccumulator = sparseConditionalAccumulator' id
                                +sparseConditionalAccumulator' :: forall m' . (MonadBuild m') => OpParams ->
                                +                                 DataType -- ^ __dtype__: The type of the value being accumulated.
                                +                                 -> Shape -- ^ __shape__: The shape of the values.
                                +                                 -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the accumulator.
                                +sparseConditionalAccumulator' op'options dtype shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "SparseConditionalAccumulator"
                                +                    & opAttr "dtype" .~ dtype
                                +                    & opAttr "shape" .~ shape
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the accumulator."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the value being accumulated."
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "shape" type: "shape" description: "The shape of the values."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this accumulator will be shared under the given name\nacross multiple sessions."
                                +}
                                +-}
                                +
                                +-- | Generates sparse cross from a list of sparse and dense tensors.
                                +--
                                +-- The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
                                +-- representing features of one feature column. It outputs a 2D `SparseTensor` with
                                +-- the batchwise crosses of these features.
                                +-- 
                                +-- For example, if the inputs are
                                +-- 
                                +--     inputs[0]: SparseTensor with shape = [2, 2]
                                +--     [0, 0]: "a"
                                +--     [1, 0]: "b"
                                +--     [1, 1]: "c"
                                +-- 
                                +--     inputs[1]: SparseTensor with shape = [2, 1]
                                +--     [0, 0]: "d"
                                +--     [1, 0]: "e"
                                +-- 
                                +--     inputs[2]: Tensor [["f"], ["g"]]
                                +-- 
                                +-- then the output will be
                                +-- 
                                +--     shape = [2, 2]
                                +--     [0, 0]: "a_X_d_X_f"
                                +--     [1, 0]: "b_X_e_X_g"
                                +--     [1, 1]: "c_X_e_X_g"
                                +-- 
                                +-- if hashed_output=true then the output will be
                                +-- 
                                +--     shape = [2, 2]
                                +--     [0, 0]: FingerprintCat64(
                                +--                 Fingerprint64("f"), FingerprintCat64(
                                +--                     Fingerprint64("d"), Fingerprint64("a")))
                                +--     [1, 0]: FingerprintCat64(
                                +--                 Fingerprint64("g"), FingerprintCat64(
                                +--                     Fingerprint64("e"), Fingerprint64("b")))
                                +--     [1, 1]: FingerprintCat64(
                                +--                 Fingerprint64("g"), FingerprintCat64(
                                +--                     Fingerprint64("e"), Fingerprint64("c")))
                                +sparseCross :: forall v'1 v'2 v'3 v'4 sparse_types dense_types
                                +               out_type . (OneOfs '[Data.ByteString.ByteString,
                                +                                    Data.Int.Int64] sparse_types,
                                +                           OneOfs '[Data.ByteString.ByteString,
                                +                                    Data.Int.Int64] dense_types,
                                +                           OneOf '[Data.ByteString.ByteString,
                                +                                   Data.Int.Int64] out_type) => 
                                +               Data.Int.Int64 -- ^ __hash_key__: Specify the hash_key that will be used by the `FingerprintCat64`
                                +                              -- function to combine the crosses fingerprints.
                                +               -> Bool -- ^ __hashed_output__: If true, returns the hash of the cross instead of the string.
                                +                       -- This will allow us avoiding string manipulations.
                                +               -> DataType -- ^ __internal_type__
                                +               -> Data.Int.Int64 -- ^ __num_buckets__: It is used if hashed_output is true.
                                +                                 -- output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
                                +               -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__: 2-D.  Indices of each input `SparseTensor`.
                                +               -> TensorList (v'2) sparse_types -- ^ __values__: 1-D.   values of each `SparseTensor`.
                                +               -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__: 1-D.   Shapes of each `SparseTensor`.
                                +               -> TensorList (v'4) dense_types -- ^ __dense_inputs__: 2-D.    Columns represented by dense `Tensor`.
                                +               -> (Tensor Build Data.Int.Int64, Tensor Build out_type,
                                +                   Tensor Build Data.Int.Int64)
                                +               -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +               --
                                +               -- * __output_indices__: 2-D.  Indices of the concatenated `SparseTensor`.
                                +               --
                                +               -- * __output_values__: 1-D.  Non-empty values of the concatenated or hashed
                                +               -- `SparseTensor`.
                                +               --
                                +               -- * __output_shape__: 1-D.  Shape of the concatenated `SparseTensor`.
                                +sparseCross = sparseCross' id
                                +sparseCross' :: forall v'1 v'2 v'3 v'4 sparse_types dense_types
                                +                out_type . (OneOfs '[Data.ByteString.ByteString,
                                +                                     Data.Int.Int64] sparse_types,
                                +                            OneOfs '[Data.ByteString.ByteString,
                                +                                     Data.Int.Int64] dense_types,
                                +                            OneOf '[Data.ByteString.ByteString,
                                +                                    Data.Int.Int64] out_type) => OpParams ->
                                +                Data.Int.Int64 -- ^ __hash_key__: Specify the hash_key that will be used by the `FingerprintCat64`
                                +                               -- function to combine the crosses fingerprints.
                                +                -> Bool -- ^ __hashed_output__: If true, returns the hash of the cross instead of the string.
                                +                        -- This will allow us avoiding string manipulations.
                                +                -> DataType -- ^ __internal_type__
                                +                -> Data.Int.Int64 -- ^ __num_buckets__: It is used if hashed_output is true.
                                +                                  -- output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
                                +                -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__: 2-D.  Indices of each input `SparseTensor`.
                                +                -> TensorList (v'2) sparse_types -- ^ __values__: 1-D.   values of each `SparseTensor`.
                                +                -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__: 1-D.   Shapes of each `SparseTensor`.
                                +                -> TensorList (v'4) dense_types -- ^ __dense_inputs__: 2-D.    Columns represented by dense `Tensor`.
                                +                -> (Tensor Build Data.Int.Int64, Tensor Build out_type,
                                +                    Tensor Build Data.Int.Int64)
                                +                -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +                --
                                +                -- * __output_indices__: 2-D.  Indices of the concatenated `SparseTensor`.
                                +                --
                                +                -- * __output_values__: 1-D.  Non-empty values of the concatenated or hashed
                                +                -- `SparseTensor`.
                                +                --
                                +                -- * __output_shape__: 1-D.  Shape of the concatenated `SparseTensor`.
                                +sparseCross' op'options hash_key hashed_output internal_type num_buckets indices
                                +             values shapes
                                +             dense_inputs | eqLengthGuard [("N", [("indices", length indices),
                                +                                                  ("shapes", length shapes)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices,
                                +                                                             buildInputs values,
                                +                                                             buildInputs shapes,
                                +                                                             buildInputs dense_inputs]
                                +        return (opDef "SparseCross"
                                +                & opAttr "sparse_types" .~ fromTensorTypes (Proxy :: Proxy sparse_types)
                                +                & opAttr "dense_types" .~ fromTensorTypes (Proxy :: Proxy dense_types)
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & opAttr "hash_key" .~ hash_key
                                +                & opAttr "hashed_output" .~ hashed_output
                                +                & opAttr "internal_type" .~ internal_type
                                +                & opAttr "num_buckets" .~ num_buckets
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length indices) :: Int64
                                +{-
                                +input_arg {
                                +  name: "indices"
                                +  description: "2-D.  Indices of each input `SparseTensor`."
                                +  type: DT_INT64
                                +  number_attr: "N"
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "1-D.   values of each `SparseTensor`."
                                +  type_list_attr: "sparse_types"
                                +}
                                +input_arg {
                                +  name: "shapes"
                                +  description: "1-D.   Shapes of each `SparseTensor`."
                                +  type: DT_INT64
                                +  number_attr: "N"
                                +}
                                +input_arg {
                                +  name: "dense_inputs"
                                +  description: "2-D.    Columns represented by dense `Tensor`."
                                +  type_list_attr: "dense_types"
                                +}
                                +output_arg {
                                +  name: "output_indices"
                                +  description: "2-D.  Indices of the concatenated `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_values"
                                +  description: "1-D.  Non-empty values of the concatenated or hashed\n`SparseTensor`."
                                +  type_attr: "out_type"
                                +}
                                +output_arg {
                                +  name: "output_shape"
                                +  description: "1-D.  Shape of the concatenated `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +attr { name: "N" type: "int" has_minimum: true }
                                +attr {
                                +  name: "hashed_output"
                                +  type: "bool"
                                +  description: "If true, returns the hash of the cross instead of the string.\nThis will allow us avoiding string manipulations."
                                +}
                                +attr {
                                +  name: "num_buckets"
                                +  type: "int"
                                +  description: "It is used if hashed_output is true.\noutput = hashed_value%num_buckets if num_buckets > 0 else hashed_value."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "hash_key"
                                +  type: "int"
                                +  description: "Specify the hash_key that will be used by the `FingerprintCat64`\nfunction to combine the crosses fingerprints."
                                +}
                                +attr {
                                +  name: "sparse_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  allowed_values { list { type: DT_INT64 type: DT_STRING } }
                                +}
                                +attr {
                                +  name: "dense_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  allowed_values { list { type: DT_INT64 type: DT_STRING } }
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT64 type: DT_STRING } }
                                +}
                                +attr {
                                +  name: "internal_type"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT64 type: DT_STRING } }
                                +}
                                +-}
                                +
                                +-- | Adds up a SparseTensor and a dense Tensor, using these special rules:
                                +--
                                +-- (1) Broadcasts the dense side to have the same shape as the sparse side, if
                                +--     eligible;
                                +-- (2) Then, only the dense values pointed to by the indices of the SparseTensor
                                +--     participate in the cwise addition.
                                +-- 
                                +-- By these rules, the result is a logical SparseTensor with exactly the same
                                +-- indices and shape, but possibly with different non-zero values.  The output of
                                +-- this Op is the resultant non-zero values.
                                +sparseDenseCwiseAdd :: forall v'1 v'2 v'3 v'4
                                +                       t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => 
                                +                       Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                 -- SparseTensor, possibly not in canonical ordering.
                                +                       -> Tensor v'2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
                                +                       -> Tensor v'4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
                                +                       -> Tensor Build t -- ^ __output__: 1-D.  The `N` values that are operated on.
                                +sparseDenseCwiseAdd = sparseDenseCwiseAdd' id
                                +sparseDenseCwiseAdd' :: forall v'1 v'2 v'3 v'4
                                +                        t . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float),
                                +                                     Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => OpParams ->
                                +                        Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                  -- SparseTensor, possibly not in canonical ordering.
                                +                        -> Tensor v'2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
                                +                        -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
                                +                        -> Tensor v'4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
                                +                        -> Tensor Build t -- ^ __output__: 1-D.  The `N` values that are operated on.
                                +sparseDenseCwiseAdd' op'options sp_indices sp_values sp_shape
                                +                     dense | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices,
                                +                                                             buildInputs sp_values,
                                +                                                             buildInputs sp_shape,
                                +                                                             buildInputs dense]
                                +        return (opDef "SparseDenseCwiseAdd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sp_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sp_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "sp_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "dense"
                                +  description: "`R`-D.  The dense Tensor operand."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "1-D.  The `N` values that are operated on."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Component-wise divides a SparseTensor by a dense Tensor.
                                +--
                                +-- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
                                +-- the other direction.
                                +sparseDenseCwiseDiv :: forall v'1 v'2 v'3 v'4
                                +                       t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => 
                                +                       Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                 -- SparseTensor, possibly not in canonical ordering.
                                +                       -> Tensor v'2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
                                +                       -> Tensor v'4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
                                +                       -> Tensor Build t -- ^ __output__: 1-D.  The `N` values that are operated on.
                                +sparseDenseCwiseDiv = sparseDenseCwiseDiv' id
                                +sparseDenseCwiseDiv' :: forall v'1 v'2 v'3 v'4
                                +                        t . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float),
                                +                                     Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => OpParams ->
                                +                        Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                  -- SparseTensor, possibly not in canonical ordering.
                                +                        -> Tensor v'2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
                                +                        -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
                                +                        -> Tensor v'4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
                                +                        -> Tensor Build t -- ^ __output__: 1-D.  The `N` values that are operated on.
                                +sparseDenseCwiseDiv' op'options sp_indices sp_values sp_shape
                                +                     dense | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices,
                                +                                                             buildInputs sp_values,
                                +                                                             buildInputs sp_shape,
                                +                                                             buildInputs dense]
                                +        return (opDef "SparseDenseCwiseDiv"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sp_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sp_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "sp_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "dense"
                                +  description: "`R`-D.  The dense Tensor operand."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "1-D.  The `N` values that are operated on."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Component-wise multiplies a SparseTensor by a dense Tensor.
                                +--
                                +-- The output locations corresponding to the implicitly zero elements in the sparse
                                +-- tensor will be zero (i.e., will not take up storage space), regardless of the
                                +-- contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
                                +-- 
                                +-- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
                                +-- the other direction.
                                +sparseDenseCwiseMul :: forall v'1 v'2 v'3 v'4
                                +                       t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => 
                                +                       Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                 -- SparseTensor, possibly not in canonical ordering.
                                +                       -> Tensor v'2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
                                +                       -> Tensor v'4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
                                +                       -> Tensor Build t -- ^ __output__: 1-D.  The `N` values that are operated on.
                                +sparseDenseCwiseMul = sparseDenseCwiseMul' id
                                +sparseDenseCwiseMul' :: forall v'1 v'2 v'3 v'4
                                +                        t . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float),
                                +                                     Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => OpParams ->
                                +                        Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                  -- SparseTensor, possibly not in canonical ordering.
                                +                        -> Tensor v'2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
                                +                        -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
                                +                        -> Tensor v'4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
                                +                        -> Tensor Build t -- ^ __output__: 1-D.  The `N` values that are operated on.
                                +sparseDenseCwiseMul' op'options sp_indices sp_values sp_shape
                                +                     dense | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices,
                                +                                                             buildInputs sp_values,
                                +                                                             buildInputs sp_shape,
                                +                                                             buildInputs dense]
                                +        return (opDef "SparseDenseCwiseMul"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sp_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sp_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "sp_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "dense"
                                +  description: "`R`-D.  The dense Tensor operand."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "1-D.  The `N` values that are operated on."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Fills empty rows in the input 2-D `SparseTensor` with a default value.
                                +--
                                +-- The input `SparseTensor` is represented via the tuple of inputs
                                +-- (`indices`, `values`, `dense_shape`).  The output `SparseTensor` has the
                                +-- same `dense_shape` but with indices `output_indices` and values
                                +-- `output_values`.
                                +-- 
                                +-- This op inserts a single entry for every row that doesn't have any values.
                                +-- The index is created as `[row, 0, ..., 0]` and the inserted value
                                +-- is `default_value`.
                                +-- 
                                +-- For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
                                +-- 
                                +--     [0, 1]: a
                                +--     [0, 3]: b
                                +--     [2, 0]: c
                                +--     [3, 1]: d
                                +-- 
                                +-- Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
                                +-- 
                                +--     [0, 1]: a
                                +--     [0, 3]: b
                                +--     [1, 0]: default_value
                                +--     [2, 0]: c
                                +--     [3, 1]: d
                                +--     [4, 0]: default_value
                                +-- 
                                +-- The output `SparseTensor` will be in row-major order and will have the
                                +-- same shape as the input.
                                +-- 
                                +-- This op also returns an indicator vector shaped `[dense_shape[0]]` such that
                                +-- 
                                +--     empty_row_indicator[i] = True iff row i was an empty row.
                                +-- 
                                +-- And a reverse index map vector shaped `[indices.shape[0]]` that is used during
                                +-- backpropagation,
                                +-- 
                                +--     reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
                                +sparseFillEmptyRows :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => 
                                +                       Tensor v'1 Data.Int.Int64 -- ^ __indices__: 2-D. the indices of the sparse tensor.
                                +                       -> Tensor v'2 t -- ^ __values__: 1-D. the values of the sparse tensor.
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __dense_shape__: 1-D. the shape of the sparse tensor.
                                +                       -> Tensor v'4 t -- ^ __default_value__: 0-D. default value to insert into location `[row, 0, ..., 0]`
                                +                                       --   for rows missing from the input sparse tensor.
                                +                                       -- output indices: 2-D. the indices of the filled sparse tensor.
                                +                       -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                           Tensor Build Bool, Tensor Build Data.Int.Int64)
                                +                       -- ^ (__output_indices__, __output_values__, __empty_row_indicator__, __reverse_index_map__)
                                +                       --
                                +                       -- * __output_indices__
                                +                       --
                                +                       -- * __output_values__: 1-D. the values of the filled sparse tensor.
                                +                       --
                                +                       -- * __empty_row_indicator__: 1-D. whether the dense row was missing in the
                                +                       -- input sparse tensor.
                                +                       --
                                +                       -- * __reverse_index_map__: 1-D. a map from the input indices to the output indices.
                                +sparseFillEmptyRows = sparseFillEmptyRows' id
                                +sparseFillEmptyRows' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams ->
                                +                        Tensor v'1 Data.Int.Int64 -- ^ __indices__: 2-D. the indices of the sparse tensor.
                                +                        -> Tensor v'2 t -- ^ __values__: 1-D. the values of the sparse tensor.
                                +                        -> Tensor v'3 Data.Int.Int64 -- ^ __dense_shape__: 1-D. the shape of the sparse tensor.
                                +                        -> Tensor v'4 t -- ^ __default_value__: 0-D. default value to insert into location `[row, 0, ..., 0]`
                                +                                        --   for rows missing from the input sparse tensor.
                                +                                        -- output indices: 2-D. the indices of the filled sparse tensor.
                                +                        -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                            Tensor Build Bool, Tensor Build Data.Int.Int64)
                                +                        -- ^ (__output_indices__, __output_values__, __empty_row_indicator__, __reverse_index_map__)
                                +                        --
                                +                        -- * __output_indices__
                                +                        --
                                +                        -- * __output_values__: 1-D. the values of the filled sparse tensor.
                                +                        --
                                +                        -- * __empty_row_indicator__: 1-D. whether the dense row was missing in the
                                +                        -- input sparse tensor.
                                +                        --
                                +                        -- * __reverse_index_map__: 1-D. a map from the input indices to the output indices.
                                +sparseFillEmptyRows' op'options indices values dense_shape
                                +                     default_value | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices,
                                +                                                             buildInputs values,
                                +                                                             buildInputs dense_shape,
                                +                                                             buildInputs default_value]
                                +        return (opDef "SparseFillEmptyRows"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "indices"
                                +  description: "2-D. the indices of the sparse tensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "1-D. the values of the sparse tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "dense_shape"
                                +  description: "1-D. the shape of the sparse tensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "default_value"
                                +  description: "0-D. default value to insert into location `[row, 0, ..., 0]`\n  for rows missing from the input sparse tensor.\noutput indices: 2-D. the indices of the filled sparse tensor."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output_indices" type: DT_INT64 }
                                +output_arg {
                                +  name: "output_values"
                                +  description: "1-D. the values of the filled sparse tensor."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "empty_row_indicator"
                                +  description: "1-D. whether the dense row was missing in the\ninput sparse tensor."
                                +  type: DT_BOOL
                                +}
                                +output_arg {
                                +  name: "reverse_index_map"
                                +  description: "1-D. a map from the input indices to the output indices."
                                +  type: DT_INT64
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | The gradient of SparseFillEmptyRows.
                                +--
                                +-- Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
                                +-- shaped `[N_full]`, where `N_full >= N` and copies data into either
                                +-- `d_values` or `d_default_value`.  Here `d_values` is shaped `[N]` and
                                +-- `d_default_value` is a scalar.
                                +-- 
                                +--   d_values[j] = grad_values[reverse_index_map[j]]
                                +--   d_default_value = sum_{k : 0 .. N_full - 1} (
                                +--      grad_values[k] * 1{k not in reverse_index_map})
                                +sparseFillEmptyRowsGrad :: forall v'1 v'2 t . (TensorType t) => 
                                +                           Tensor v'1 Data.Int.Int64 -- ^ __reverse_index_map__: 1-D.  The reverse index map from SparseFillEmptyRows.
                                +                           -> Tensor v'2 t -- ^ __grad_values__: 1-D.  The gradients from backprop.
                                +                           -> (Tensor Build t, Tensor Build t)
                                +                           -- ^ (__d_values__, __d_default_value__)
                                +                           --
                                +                           -- * __d_values__: 1-D.  The backprop into values.
                                +                           --
                                +                           -- * __d_default_value__: 0-D.  The backprop into default_value.
                                +sparseFillEmptyRowsGrad = sparseFillEmptyRowsGrad' id
                                +sparseFillEmptyRowsGrad' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +                            Tensor v'1 Data.Int.Int64 -- ^ __reverse_index_map__: 1-D.  The reverse index map from SparseFillEmptyRows.
                                +                            -> Tensor v'2 t -- ^ __grad_values__: 1-D.  The gradients from backprop.
                                +                            -> (Tensor Build t, Tensor Build t)
                                +                            -- ^ (__d_values__, __d_default_value__)
                                +                            --
                                +                            -- * __d_values__: 1-D.  The backprop into values.
                                +                            --
                                +                            -- * __d_default_value__: 0-D.  The backprop into default_value.
                                +sparseFillEmptyRowsGrad' op'options reverse_index_map
                                +                         grad_values | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reverse_index_map,
                                +                                                             buildInputs grad_values]
                                +        return (opDef "SparseFillEmptyRowsGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "reverse_index_map"
                                +  description: "1-D.  The reverse index map from SparseFillEmptyRows."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "grad_values"
                                +  description: "1-D.  The gradients from backprop."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "d_values"
                                +  description: "1-D.  The backprop into values."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "d_default_value"
                                +  description: "0-D.  The backprop into default_value."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Multiply matrix "a" by matrix "b".
                                +--
                                +-- The inputs must be two-dimensional matrices and the inner dimension of "a" must
                                +-- match the outer dimension of "b". This op is optimized for the case where at
                                +-- least one of "a" or "b" is sparse. The breakeven for using this versus a dense
                                +-- matrix multiply on one platform was 30% zero values in the sparse matrix.
                                +-- 
                                +-- The gradient computation of this operation will only take advantage of sparsity
                                +-- in the input gradient when that gradient comes from a Relu.
                                +sparseMatMul :: forall v'1 v'2 ta tb . (OneOf '[Data.Word.Word16, Float] ta,
                                +                                        OneOf '[Data.Word.Word16, Float] tb) => 
                                +                Tensor v'1 ta -- ^ __a__
                                +                -> Tensor v'2 tb -- ^ __b__
                                +                -> Tensor Build Float -- ^ __product__
                                +sparseMatMul = sparseMatMul' id
                                +sparseMatMul' :: forall v'1 v'2 ta tb . (OneOf '[Data.Word.Word16, Float] ta,
                                +                                         OneOf '[Data.Word.Word16, Float] tb) =>
                                +                 OpParams ->
                                +                 Tensor v'1 ta -- ^ __a__
                                +                 -> Tensor v'2 tb -- ^ __b__
                                +                 -> Tensor Build Float -- ^ __product__
                                +sparseMatMul' op'options a b | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a,
                                +                                                             buildInputs b]
                                +        return (opDef "SparseMatMul"
                                +                & opAttr "Ta" .~ tensorType (undefined :: ta)
                                +                & opAttr "Tb" .~ tensorType (undefined :: tb)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "a" type_attr: "Ta" }
                                +input_arg { name: "b" type_attr: "Tb" }
                                +output_arg { name: "product" type: DT_FLOAT }
                                +attr {
                                +  name: "transpose_a" type: "bool" default_value { b: false }
                                +}
                                +attr {
                                +  name: "transpose_b" type: "bool" default_value { b: false }
                                +}
                                +attr {
                                +  name: "a_is_sparse" type: "bool" default_value { b: false }
                                +}
                                +attr {
                                +  name: "b_is_sparse" type: "bool" default_value { b: false }
                                +}
                                +attr {
                                +  name: "Ta"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } }
                                +}
                                +attr {
                                +  name: "Tb"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } }
                                +}
                                +-}
                                +
                                +-- | Computes the max of elements across dimensions of a SparseTensor.
                                +--
                                +-- This Op takes a SparseTensor and is the sparse counterpart to
                                +-- `tf.reduce_max()`.  In particular, this Op also returns a dense `Tensor`
                                +-- instead of a sparse one.
                                +-- 
                                +-- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
                                +-- with length 1.
                                +-- 
                                +-- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
                                +-- with a single element is returned.  Additionally, the axes can be negative,
                                +-- which are interpreted according to the indexing rules in Python.
                                +sparseReduceMax :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                       Data.Int.Int32,
                                +                                                       Data.Int.Int64,
                                +                                                       Data.Int.Int8,
                                +                                                       Data.Word.Word16,
                                +                                                       Data.Word.Word8, Double,
                                +                                                       Float] t) => 
                                +                   Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                             -- SparseTensor, possibly not in canonical ordering.
                                +                   -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                   -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                   -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
                                +                   -> Tensor Build t -- ^ __output__: `R-K`-D.  The reduced Tensor.
                                +sparseReduceMax = sparseReduceMax' id
                                +sparseReduceMax' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                        Data.Int.Int32,
                                +                                                        Data.Int.Int64,
                                +                                                        Data.Int.Int8,
                                +                                                        Data.Word.Word16,
                                +                                                        Data.Word.Word8, Double,
                                +                                                        Float] t) => OpParams ->
                                +                    Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                              -- SparseTensor, possibly not in canonical ordering.
                                +                    -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                    -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                    -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
                                +                    -> Tensor Build t -- ^ __output__: `R-K`-D.  The reduced Tensor.
                                +sparseReduceMax' op'options input_indices input_values input_shape
                                +                 reduction_axes | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices,
                                +                                                             buildInputs input_values,
                                +                                                             buildInputs input_shape,
                                +                                                             buildInputs reduction_axes]
                                +        return (opDef "SparseReduceMax"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "input_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "input_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "reduction_axes"
                                +  description: "1-D.  Length-`K` vector containing the reduction axes."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "`R-K`-D.  The reduced Tensor."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the max of elements across dimensions of a SparseTensor.
                                +--
                                +-- This Op takes a SparseTensor and is the sparse counterpart to
                                +-- `tf.reduce_max()`.  In contrast to SparseReduceMax, this Op returns a
                                +-- SparseTensor.
                                +-- 
                                +-- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
                                +-- with length 1.
                                +-- 
                                +-- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
                                +-- with a single element is returned.  Additionally, the axes can be negative,
                                +-- which are interpreted according to the indexing rules in Python.
                                +sparseReduceMaxSparse :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                             Data.Int.Int32,
                                +                                                             Data.Int.Int64,
                                +                                                             Data.Int.Int8,
                                +                                                             Data.Word.Word16,
                                +                                                             Data.Word.Word8,
                                +                                                             Double,
                                +                                                             Float] t) => 
                                +                         Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                   -- SparseTensor, possibly not in canonical ordering.
                                +                         -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                         -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                         -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
                                +                         -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                             Tensor Build Data.Int.Int64)
                                +                         -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +                         --
                                +                         -- * __output_indices__
                                +                         --
                                +                         -- * __output_values__
                                +                         --
                                +                         -- * __output_shape__
                                +sparseReduceMaxSparse = sparseReduceMaxSparse' id
                                +sparseReduceMaxSparse' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16,
                                +                                                              Data.Int.Int32,
                                +                                                              Data.Int.Int64,
                                +                                                              Data.Int.Int8,
                                +                                                              Data.Word.Word16,
                                +                                                              Data.Word.Word8,
                                +                                                              Double,
                                +                                                              Float] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                    -- SparseTensor, possibly not in canonical ordering.
                                +                          -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                          -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                          -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
                                +                          -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                              Tensor Build Data.Int.Int64)
                                +                          -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +                          --
                                +                          -- * __output_indices__
                                +                          --
                                +                          -- * __output_values__
                                +                          --
                                +                          -- * __output_shape__
                                +sparseReduceMaxSparse' op'options input_indices input_values input_shape
                                +                       reduction_axes | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices,
                                +                                                             buildInputs input_values,
                                +                                                             buildInputs input_shape,
                                +                                                             buildInputs reduction_axes]
                                +        return (opDef "SparseReduceMaxSparse"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "input_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "input_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "reduction_axes"
                                +  description: "1-D.  Length-`K` vector containing the reduction axes."
                                +  type: DT_INT32
                                +}
                                +output_arg { name: "output_indices" type: DT_INT64 }
                                +output_arg { name: "output_values" type_attr: "T" }
                                +output_arg { name: "output_shape" type: DT_INT64 }
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the sum of elements across dimensions of a SparseTensor.
                                +--
                                +-- This Op takes a SparseTensor and is the sparse counterpart to
                                +-- `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
                                +-- instead of a sparse one.
                                +-- 
                                +-- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
                                +-- with length 1.
                                +-- 
                                +-- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
                                +-- with a single element is returned.  Additionally, the axes can be negative,
                                +-- which are interpreted according to the indexing rules in Python.
                                +sparseReduceSum :: forall v'1 v'2 v'3 v'4
                                +                   t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                                Data.Word.Word16, Data.Word.Word8, Double,
                                +                                Float] t) => 
                                +                   Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                             -- SparseTensor, possibly not in canonical ordering.
                                +                   -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                   -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                   -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
                                +                   -> Tensor Build t -- ^ __output__: `R-K`-D.  The reduced Tensor.
                                +sparseReduceSum = sparseReduceSum' id
                                +sparseReduceSum' :: forall v'1 v'2 v'3 v'4
                                +                    t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Int.Int16,
                                +                                 Data.Int.Int32, Data.Int.Int64, Data.Int.Int8,
                                +                                 Data.Word.Word16, Data.Word.Word8, Double,
                                +                                 Float] t) => OpParams ->
                                +                    Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                              -- SparseTensor, possibly not in canonical ordering.
                                +                    -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                    -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                    -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
                                +                    -> Tensor Build t -- ^ __output__: `R-K`-D.  The reduced Tensor.
                                +sparseReduceSum' op'options input_indices input_values input_shape
                                +                 reduction_axes | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices,
                                +                                                             buildInputs input_values,
                                +                                                             buildInputs input_shape,
                                +                                                             buildInputs reduction_axes]
                                +        return (opDef "SparseReduceSum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "input_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "input_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "reduction_axes"
                                +  description: "1-D.  Length-`K` vector containing the reduction axes."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "`R-K`-D.  The reduced Tensor."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the sum of elements across dimensions of a SparseTensor.
                                +--
                                +-- This Op takes a SparseTensor and is the sparse counterpart to
                                +-- `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a
                                +-- SparseTensor.
                                +-- 
                                +-- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
                                +-- with length 1.
                                +-- 
                                +-- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
                                +-- with a single element is returned.  Additionally, the axes can be negative,
                                +-- which are interpreted according to the indexing rules in Python.
                                +sparseReduceSumSparse :: forall v'1 v'2 v'3 v'4
                                +                         t . (OneOf '[(Data.Complex.Complex Double),
                                +                                      (Data.Complex.Complex Float),
                                +                                      Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Int.Int64, Data.Int.Int8,
                                +                                      Data.Word.Word16, Data.Word.Word8, Double,
                                +                                      Float] t) => 
                                +                         Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                   -- SparseTensor, possibly not in canonical ordering.
                                +                         -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                         -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                         -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
                                +                         -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                             Tensor Build Data.Int.Int64)
                                +                         -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +                         --
                                +                         -- * __output_indices__
                                +                         --
                                +                         -- * __output_values__
                                +                         --
                                +                         -- * __output_shape__
                                +sparseReduceSumSparse = sparseReduceSumSparse' id
                                +sparseReduceSumSparse' :: forall v'1 v'2 v'3 v'4
                                +                          t . (OneOf '[(Data.Complex.Complex Double),
                                +                                       (Data.Complex.Complex Float),
                                +                                       Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t) => OpParams ->
                                +                          Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                    -- SparseTensor, possibly not in canonical ordering.
                                +                          -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                          -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                          -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
                                +                          -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                              Tensor Build Data.Int.Int64)
                                +                          -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +                          --
                                +                          -- * __output_indices__
                                +                          --
                                +                          -- * __output_values__
                                +                          --
                                +                          -- * __output_shape__
                                +sparseReduceSumSparse' op'options input_indices input_values input_shape
                                +                       reduction_axes | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices,
                                +                                                             buildInputs input_values,
                                +                                                             buildInputs input_shape,
                                +                                                             buildInputs reduction_axes]
                                +        return (opDef "SparseReduceSumSparse"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "input_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "input_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "reduction_axes"
                                +  description: "1-D.  Length-`K` vector containing the reduction axes."
                                +  type: DT_INT32
                                +}
                                +output_arg { name: "output_indices" type: DT_INT64 }
                                +output_arg { name: "output_values" type_attr: "T" }
                                +output_arg { name: "output_shape" type: DT_INT64 }
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Reorders a SparseTensor into the canonical, row-major ordering.
                                +--
                                +-- Note that by convention, all sparse ops preserve the canonical ordering along
                                +-- increasing dimension number. The only time ordering can be violated is during
                                +-- manual manipulation of the indices and values vectors to add entries.
                                +-- 
                                +-- Reordering does not affect the shape of the SparseTensor.
                                +-- 
                                +-- If the tensor has rank `R` and `N` non-empty values, `input_indices` has
                                +-- shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
                                +sparseReorder :: forall v'1 v'2 v'3 t . (TensorType t) => 
                                +                 Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                           -- SparseTensor, possibly not in canonical ordering.
                                +                 -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                 -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                 -> (Tensor Build Data.Int.Int64, Tensor Build t)
                                +                 -- ^ (__output_indices__, __output_values__)
                                +                 --
                                +                 -- * __output_indices__: 2-D.  `N x R` matrix with the same indices as input_indices, but
                                +                 -- in canonical row-major ordering.
                                +                 --
                                +                 -- * __output_values__: 1-D.  `N` non-empty values corresponding to `output_indices`.
                                +sparseReorder = sparseReorder' id
                                +sparseReorder' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams ->
                                +                  Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                            -- SparseTensor, possibly not in canonical ordering.
                                +                  -> Tensor v'2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
                                +                  -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
                                +                  -> (Tensor Build Data.Int.Int64, Tensor Build t)
                                +                  -- ^ (__output_indices__, __output_values__)
                                +                  --
                                +                  -- * __output_indices__: 2-D.  `N x R` matrix with the same indices as input_indices, but
                                +                  -- in canonical row-major ordering.
                                +                  --
                                +                  -- * __output_values__: 1-D.  `N` non-empty values corresponding to `output_indices`.
                                +sparseReorder' op'options input_indices input_values
                                +               input_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices,
                                +                                                             buildInputs input_values,
                                +                                                             buildInputs input_shape]
                                +        return (opDef "SparseReorder"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "input_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "input_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_indices"
                                +  description: "2-D.  `N x R` matrix with the same indices as input_indices, but\nin canonical row-major ordering."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `output_indices`."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Reshapes a SparseTensor to represent values in a new dense shape.
                                +--
                                +-- This operation has the same semantics as reshape on the represented dense
                                +-- tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
                                +-- 
                                +-- If one component of `new_shape` is the special value -1, the size of that
                                +-- dimension is computed so that the total dense size remains constant.  At
                                +-- most one component of `new_shape` can be -1.  The number of dense elements
                                +-- implied by `new_shape` must be the same as the number of dense elements
                                +-- originally implied by `input_shape`.
                                +-- 
                                +-- Reshaping does not affect the order of values in the SparseTensor.
                                +-- 
                                +-- If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
                                +-- has length `R_out`, then `input_indices` has shape `[N, R_in]`,
                                +-- `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
                                +-- `output_shape` has length `R_out`.
                                +sparseReshape :: 
                                +                 Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R_in` matrix with the indices of non-empty values in a
                                +                                           -- SparseTensor.
                                +                 -> Tensor v'2 Data.Int.Int64 -- ^ __input_shape__: 1-D.  `R_in` vector with the input SparseTensor's dense shape.
                                +                 -> Tensor v'3 Data.Int.Int64 -- ^ __new_shape__: 1-D.  `R_out` vector with the requested new dense shape.
                                +                 -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64)
                                +                 -- ^ (__output_indices__, __output_shape__)
                                +                 --
                                +                 -- * __output_indices__: 2-D.  `N x R_out` matrix with the updated indices of non-empty
                                +                 -- values in the output SparseTensor.
                                +                 --
                                +                 -- * __output_shape__: 1-D.  `R_out` vector with the full dense shape of the output
                                +                 -- SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
                                +                 -- filled in.
                                +sparseReshape = sparseReshape' id
                                +sparseReshape' :: OpParams ->
                                +                  Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R_in` matrix with the indices of non-empty values in a
                                +                                            -- SparseTensor.
                                +                  -> Tensor v'2 Data.Int.Int64 -- ^ __input_shape__: 1-D.  `R_in` vector with the input SparseTensor's dense shape.
                                +                  -> Tensor v'3 Data.Int.Int64 -- ^ __new_shape__: 1-D.  `R_out` vector with the requested new dense shape.
                                +                  -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64)
                                +                  -- ^ (__output_indices__, __output_shape__)
                                +                  --
                                +                  -- * __output_indices__: 2-D.  `N x R_out` matrix with the updated indices of non-empty
                                +                  -- values in the output SparseTensor.
                                +                  --
                                +                  -- * __output_shape__: 1-D.  `R_out` vector with the full dense shape of the output
                                +                  -- SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
                                +                  -- filled in.
                                +sparseReshape' op'options input_indices input_shape
                                +               new_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices,
                                +                                                             buildInputs input_shape,
                                +                                                             buildInputs new_shape]
                                +        return (opDef "SparseReshape"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input_indices"
                                +  description: "2-D.  `N x R_in` matrix with the indices of non-empty values in a\nSparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "input_shape"
                                +  description: "1-D.  `R_in` vector with the input SparseTensor\'s dense shape."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "new_shape"
                                +  description: "1-D.  `R_out` vector with the requested new dense shape."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_indices"
                                +  description: "2-D.  `N x R_out` matrix with the updated indices of non-empty\nvalues in the output SparseTensor."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_shape"
                                +  description: "1-D.  `R_out` vector with the full dense shape of the output\nSparseTensor.  This is the same as `new_shape` but with any -1 dimensions\nfilled in."
                                +  type: DT_INT64
                                +}
                                +-}
                                +
                                +-- | Computes the mean along sparse segments of a tensor.
                                +--
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +-- 
                                +-- Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
                                +-- dimension, selecting a subset of dimension 0, specified by `indices`.
                                +sparseSegmentMean :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t,
                                +                                                  OneOf '[Data.Int.Int32,
                                +                                                          Data.Int.Int64] tidx) =>
                                +                     
                                +                     Tensor v'1 t -- ^ __data__
                                +                     -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
                                +                     -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
                                +                     -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                     -- has size `k`, the number of segments.
                                +sparseSegmentMean = sparseSegmentMean' id
                                +sparseSegmentMean' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t,
                                +                                                   OneOf '[Data.Int.Int32,
                                +                                                           Data.Int.Int64] tidx) =>
                                +                      OpParams ->
                                +                      Tensor v'1 t -- ^ __data__
                                +                      -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
                                +                      -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
                                +                      -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                      -- has size `k`, the number of segments.
                                +sparseSegmentMean' op'options data' indices segment_ids | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs indices,
                                +                                                             buildInputs segment_ids]
                                +        return (opDef "SparseSegmentMean"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "indices"
                                +  description: "A 1-D tensor. Has same rank as `segment_ids`."
                                +  type_attr: "Tidx"
                                +}
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A 1-D tensor. Values should be sorted and can be repeated."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes gradients for SparseSegmentMean.
                                +--
                                +-- Returns tensor "output" with same shape as grad, except for dimension 0 whose
                                +-- value is output_dim0.
                                +sparseSegmentMeanGrad :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double,
                                +                                                                  Float] t,
                                +                                                          OneOf '[Data.Int.Int32,
                                +                                                                  Data.Int.Int64] tidx) =>
                                +                         
                                +                         Tensor v'1 t -- ^ __grad__: gradient propagated to the SparseSegmentMean op.
                                +                         -> Tensor v'2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentMean op.
                                +                         -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentMean op.
                                +                         -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentMean op.
                                +                         -> Tensor Build t -- ^ __output__
                                +sparseSegmentMeanGrad = sparseSegmentMeanGrad' id
                                +sparseSegmentMeanGrad' :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double,
                                +                                                                   Float] t,
                                +                                                           OneOf '[Data.Int.Int32,
                                +                                                                   Data.Int.Int64] tidx) =>
                                +                          OpParams ->
                                +                          Tensor v'1 t -- ^ __grad__: gradient propagated to the SparseSegmentMean op.
                                +                          -> Tensor v'2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentMean op.
                                +                          -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentMean op.
                                +                          -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentMean op.
                                +                          -> Tensor Build t -- ^ __output__
                                +sparseSegmentMeanGrad' op'options grad indices segment_ids
                                +                       output_dim0 | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs segment_ids,
                                +                                                             buildInputs output_dim0]
                                +        return (opDef "SparseSegmentMeanGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "grad"
                                +  description: "gradient propagated to the SparseSegmentMean op."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "indices passed to the corresponding SparseSegmentMean op."
                                +  type_attr: "Tidx"
                                +}
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "segment_ids passed to the corresponding SparseSegmentMean op."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "output_dim0"
                                +  description: "dimension 0 of \"data\" passed to SparseSegmentMean op."
                                +  type: DT_INT32
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the sum along sparse segments of a tensor divided by the sqrt of N.
                                +--
                                +-- N is the size of the segment being reduced.
                                +-- 
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +sparseSegmentSqrtN :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t,
                                +                                                   OneOf '[Data.Int.Int32,
                                +                                                           Data.Int.Int64] tidx) =>
                                +                      
                                +                      Tensor v'1 t -- ^ __data__
                                +                      -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
                                +                      -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
                                +                      -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                      -- has size `k`, the number of segments.
                                +sparseSegmentSqrtN = sparseSegmentSqrtN' id
                                +sparseSegmentSqrtN' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t,
                                +                                                    OneOf '[Data.Int.Int32,
                                +                                                            Data.Int.Int64] tidx) =>
                                +                       OpParams ->
                                +                       Tensor v'1 t -- ^ __data__
                                +                       -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
                                +                       -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
                                +                       -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                       -- has size `k`, the number of segments.
                                +sparseSegmentSqrtN' op'options data' indices segment_ids | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs indices,
                                +                                                             buildInputs segment_ids]
                                +        return (opDef "SparseSegmentSqrtN"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "indices"
                                +  description: "A 1-D tensor. Has same rank as `segment_ids`."
                                +  type_attr: "Tidx"
                                +}
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A 1-D tensor. Values should be sorted and can be repeated."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes gradients for SparseSegmentSqrtN.
                                +--
                                +-- Returns tensor "output" with same shape as grad, except for dimension 0 whose
                                +-- value is output_dim0.
                                +sparseSegmentSqrtNGrad :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double,
                                +                                                                   Float] t,
                                +                                                           OneOf '[Data.Int.Int32,
                                +                                                                   Data.Int.Int64] tidx) =>
                                +                          
                                +                          Tensor v'1 t -- ^ __grad__: gradient propagated to the SparseSegmentSqrtN op.
                                +                          -> Tensor v'2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentSqrtN op.
                                +                          -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentSqrtN op.
                                +                          -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentSqrtN op.
                                +                          -> Tensor Build t -- ^ __output__
                                +sparseSegmentSqrtNGrad = sparseSegmentSqrtNGrad' id
                                +sparseSegmentSqrtNGrad' :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double,
                                +                                                                    Float] t,
                                +                                                            OneOf '[Data.Int.Int32,
                                +                                                                    Data.Int.Int64] tidx) =>
                                +                           OpParams ->
                                +                           Tensor v'1 t -- ^ __grad__: gradient propagated to the SparseSegmentSqrtN op.
                                +                           -> Tensor v'2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentSqrtN op.
                                +                           -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentSqrtN op.
                                +                           -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentSqrtN op.
                                +                           -> Tensor Build t -- ^ __output__
                                +sparseSegmentSqrtNGrad' op'options grad indices segment_ids
                                +                        output_dim0 | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grad,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs segment_ids,
                                +                                                             buildInputs output_dim0]
                                +        return (opDef "SparseSegmentSqrtNGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "grad"
                                +  description: "gradient propagated to the SparseSegmentSqrtN op."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "indices passed to the corresponding SparseSegmentSqrtN op."
                                +  type_attr: "Tidx"
                                +}
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "segment_ids passed to the corresponding SparseSegmentSqrtN op."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "output_dim0"
                                +  description: "dimension 0 of \"data\" passed to SparseSegmentSqrtN op."
                                +  type: DT_INT32
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the sum along sparse segments of a tensor.
                                +--
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +-- 
                                +-- Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
                                +-- dimension, selecting a subset of dimension 0, specified by `indices`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```python
                                +-- c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
                                +-- 
                                +-- # Select two rows, one segment.
                                +-- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
                                +-- # => [[0 0 0 0]]
                                +-- 
                                +-- # Select two rows, two segment.
                                +-- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
                                +-- # => [[ 1  2  3  4]
                                +-- #     [-1 -2 -3 -4]]
                                +-- 
                                +-- # Select all rows, two segments.
                                +-- tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
                                +-- # => [[0 0 0 0]
                                +-- #     [5 6 7 8]]
                                +-- 
                                +-- # Which is equivalent to:
                                +-- tf.segment_sum(c, tf.constant([0, 0, 1]))
                                +-- ```
                                +sparseSegmentSum :: forall v'1 v'2 v'3 t tidx . (OneOf '[Data.Int.Int16,
                                +                                                         Data.Int.Int32,
                                +                                                         Data.Int.Int64,
                                +                                                         Data.Int.Int8,
                                +                                                         Data.Word.Word16,
                                +                                                         Data.Word.Word8,
                                +                                                         Double, Float] t,
                                +                                                 OneOf '[Data.Int.Int32,
                                +                                                         Data.Int.Int64] tidx) =>
                                +                    
                                +                    Tensor v'1 t -- ^ __data__
                                +                    -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
                                +                    -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
                                +                    -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                    -- has size `k`, the number of segments.
                                +sparseSegmentSum = sparseSegmentSum' id
                                +sparseSegmentSum' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Data.Int.Int16,
                                +                                                          Data.Int.Int32,
                                +                                                          Data.Int.Int64,
                                +                                                          Data.Int.Int8,
                                +                                                          Data.Word.Word16,
                                +                                                          Data.Word.Word8,
                                +                                                          Double, Float] t,
                                +                                                  OneOf '[Data.Int.Int32,
                                +                                                          Data.Int.Int64] tidx) =>
                                +                     OpParams ->
                                +                     Tensor v'1 t -- ^ __data__
                                +                     -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
                                +                     -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
                                +                     -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                     -- has size `k`, the number of segments.
                                +sparseSegmentSum' op'options data' indices segment_ids | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs indices,
                                +                                                             buildInputs segment_ids]
                                +        return (opDef "SparseSegmentSum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "indices"
                                +  description: "A 1-D tensor. Has same rank as `segment_ids`."
                                +  type_attr: "Tidx"
                                +}
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A 1-D tensor. Values should be sorted and can be repeated."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Slice a `SparseTensor` based on the `start` and `size`.
                                +--
                                +-- For example, if the input is
                                +-- 
                                +--     input_tensor = shape = [2, 7]
                                +--     [    a   d e  ]
                                +--     [b c          ]
                                +-- 
                                +-- Graphically the output tensors are:
                                +-- 
                                +--     sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
                                +--     [    a  ]
                                +--     [b c    ]
                                +-- 
                                +--     sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
                                +--     [ d e  ]
                                +--     [      ]
                                +sparseSlice :: forall v'1 v'2 v'3 v'4 v'5 t . (TensorType t) => 
                                +               Tensor v'1 Data.Int.Int64 -- ^ __indices__: 2-D tensor represents the indices of the sparse tensor.
                                +               -> Tensor v'2 t -- ^ __values__: 1-D tensor represents the values of the sparse tensor.
                                +               -> Tensor v'3 Data.Int.Int64 -- ^ __shape__: 1-D. tensor represents the shape of the sparse tensor.
                                +               -> Tensor v'4 Data.Int.Int64 -- ^ __start__: 1-D. tensor represents the start of the slice.
                                +               -> Tensor v'5 Data.Int.Int64 -- ^ __size__: 1-D. tensor represents the size of the slice.
                                +                                            -- output indices: A list of 1-D tensors represents the indices of the output
                                +                                            -- sparse tensors.
                                +               -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                   Tensor Build Data.Int.Int64)
                                +               -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +               --
                                +               -- * __output_indices__
                                +               --
                                +               -- * __output_values__: A list of 1-D tensors represents the values of the output sparse
                                +               -- tensors.
                                +               --
                                +               -- * __output_shape__: A list of 1-D tensors represents the shape of the output sparse
                                +               -- tensors.
                                +sparseSlice = sparseSlice' id
                                +sparseSlice' :: forall v'1 v'2 v'3 v'4 v'5 t . (TensorType t) => OpParams ->
                                +                Tensor v'1 Data.Int.Int64 -- ^ __indices__: 2-D tensor represents the indices of the sparse tensor.
                                +                -> Tensor v'2 t -- ^ __values__: 1-D tensor represents the values of the sparse tensor.
                                +                -> Tensor v'3 Data.Int.Int64 -- ^ __shape__: 1-D. tensor represents the shape of the sparse tensor.
                                +                -> Tensor v'4 Data.Int.Int64 -- ^ __start__: 1-D. tensor represents the start of the slice.
                                +                -> Tensor v'5 Data.Int.Int64 -- ^ __size__: 1-D. tensor represents the size of the slice.
                                +                                             -- output indices: A list of 1-D tensors represents the indices of the output
                                +                                             -- sparse tensors.
                                +                -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                    Tensor Build Data.Int.Int64)
                                +                -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +                --
                                +                -- * __output_indices__
                                +                --
                                +                -- * __output_values__: A list of 1-D tensors represents the values of the output sparse
                                +                -- tensors.
                                +                --
                                +                -- * __output_shape__: A list of 1-D tensors represents the shape of the output sparse
                                +                -- tensors.
                                +sparseSlice' op'options indices values shape start size | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices,
                                +                                                             buildInputs values,
                                +                                                             buildInputs shape,
                                +                                                             buildInputs start,
                                +                                                             buildInputs size]
                                +        return (opDef "SparseSlice"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "indices"
                                +  description: "2-D tensor represents the indices of the sparse tensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "1-D tensor represents the values of the sparse tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "shape"
                                +  description: "1-D. tensor represents the shape of the sparse tensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "start"
                                +  description: "1-D. tensor represents the start of the slice."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "size"
                                +  description: "1-D. tensor represents the size of the slice.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "output_indices" type: DT_INT64 }
                                +output_arg {
                                +  name: "output_values"
                                +  description: "A list of 1-D tensors represents the values of the output sparse\ntensors."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_shape"
                                +  description: "A list of 1-D tensors represents the shape of the output sparse\ntensors."
                                +  type: DT_INT64
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Applies softmax to a batched N-D `SparseTensor`.
                                +--
                                +-- The inputs represent an N-D SparseTensor  with logical shape `[..., B, C]`
                                +-- (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
                                +-- 
                                +-- This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
                                +-- logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
                                +-- zero elements do not participate*.  Specifically, the algorithm is equivalent
                                +-- to the following:
                                +-- 
                                +--   (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
                                +--       with shape `[B, C]`, along the size-C dimension;
                                +--   (2) Masks out the original implicitly-zero locations;
                                +--   (3) Renormalizes the remaining elements.
                                +-- 
                                +-- Hence, the `SparseTensor` result has exactly the same non-zero indices and
                                +-- shape.
                                +sparseSoftmax :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => 
                                +                 Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a
                                +                                           -- SparseTensor, in canonical ordering.
                                +                 -> Tensor v'2 t -- ^ __sp_values__: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.
                                +                 -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
                                +                 -> Tensor Build t -- ^ __output__: 1-D.  The `NNZ` values for the result `SparseTensor`.
                                +sparseSoftmax = sparseSoftmax' id
                                +sparseSoftmax' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) =>
                                +                  OpParams ->
                                +                  Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a
                                +                                            -- SparseTensor, in canonical ordering.
                                +                  -> Tensor v'2 t -- ^ __sp_values__: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.
                                +                  -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
                                +                  -> Tensor Build t -- ^ __output__: 1-D.  The `NNZ` values for the result `SparseTensor`.
                                +sparseSoftmax' op'options sp_indices sp_values sp_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices,
                                +                                                             buildInputs sp_values,
                                +                                                             buildInputs sp_shape]
                                +        return (opDef "SparseSoftmax"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sp_indices"
                                +  description: "2-D.  `NNZ x R` matrix with the indices of non-empty values in a\nSparseTensor, in canonical ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "sp_values"
                                +  description: "1-D.  `NNZ` non-empty values corresponding to `sp_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "sp_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "1-D.  The `NNZ` values for the result `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Computes softmax cross entropy cost and gradients to backpropagate.
                                +--
                                +-- Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
                                +-- a matrix of label probabilities, but rather a single label per row
                                +-- of features.  This label is considered to have probability 1.0 for the
                                +-- given row.
                                +-- 
                                +-- Inputs are the logits, not probabilities.
                                +sparseSoftmaxCrossEntropyWithLogits :: forall v'1 v'2 t
                                +                                       tlabels . (OneOf '[Data.Word.Word16,
                                +                                                          Double, Float] t,
                                +                                                  OneOf '[Data.Int.Int32,
                                +                                                          Data.Int.Int64] tlabels) =>
                                +                                       
                                +                                       Tensor v'1 t -- ^ __features__: batch_size x num_classes matrix
                                +                                       -> Tensor v'2 tlabels -- ^ __labels__: batch_size vector with values in [0, num_classes).
                                +                                                             -- This is the label for the given minibatch entry.
                                +                                       -> (Tensor Build t, Tensor Build t)
                                +                                       -- ^ (__loss__, __backprop__)
                                +                                       --
                                +                                       -- * __loss__: Per example loss (batch_size vector).
                                +                                       --
                                +                                       -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).
                                +sparseSoftmaxCrossEntropyWithLogits = sparseSoftmaxCrossEntropyWithLogits' id
                                +sparseSoftmaxCrossEntropyWithLogits' :: forall v'1 v'2 t
                                +                                        tlabels . (OneOf '[Data.Word.Word16,
                                +                                                           Double, Float] t,
                                +                                                   OneOf '[Data.Int.Int32,
                                +                                                           Data.Int.Int64] tlabels) =>
                                +                                        OpParams ->
                                +                                        Tensor v'1 t -- ^ __features__: batch_size x num_classes matrix
                                +                                        -> Tensor v'2 tlabels -- ^ __labels__: batch_size vector with values in [0, num_classes).
                                +                                                              -- This is the label for the given minibatch entry.
                                +                                        -> (Tensor Build t, Tensor Build t)
                                +                                        -- ^ (__loss__, __backprop__)
                                +                                        --
                                +                                        -- * __loss__: Per example loss (batch_size vector).
                                +                                        --
                                +                                        -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).
                                +sparseSoftmaxCrossEntropyWithLogits' op'options features
                                +                                     labels | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features,
                                +                                                             buildInputs labels]
                                +        return (opDef "SparseSoftmaxCrossEntropyWithLogits"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tlabels" .~ tensorType (undefined :: tlabels)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "features"
                                +  description: "batch_size x num_classes matrix"
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "labels"
                                +  description: "batch_size vector with values in [0, num_classes).\nThis is the label for the given minibatch entry."
                                +  type_attr: "Tlabels"
                                +}
                                +output_arg {
                                +  name: "loss"
                                +  description: "Per example loss (batch_size vector)."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "backprop"
                                +  description: "backpropagated gradients (batch_size x num_classes matrix)."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "Tlabels"
                                +  type: "type"
                                +  default_value { type: DT_INT64 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Returns the element-wise max of two SparseTensors.
                                +--
                                +-- Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
                                +sparseSparseMaximum :: forall v'1 v'2 v'3 v'4 v'5 v'6
                                +                       t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => 
                                +                       Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                 -- SparseTensor, in the canonical lexicographic ordering.
                                +                       -> Tensor v'2 t -- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.
                                +                       -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand.
                                +                       -> Tensor v'5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.
                                +                       -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.
                                +                       -> (Tensor Build Data.Int.Int64, Tensor Build t)
                                +                       -- ^ (__output_indices__, __output_values__)
                                +                       --
                                +                       -- * __output_indices__: 2-D.  The indices of the output SparseTensor.
                                +                       --
                                +                       -- * __output_values__: 1-D.  The values of the output SparseTensor.
                                +sparseSparseMaximum = sparseSparseMaximum' id
                                +sparseSparseMaximum' :: forall v'1 v'2 v'3 v'4 v'5 v'6
                                +                        t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => OpParams ->
                                +                        Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                  -- SparseTensor, in the canonical lexicographic ordering.
                                +                        -> Tensor v'2 t -- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.
                                +                        -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.
                                +                        -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand.
                                +                        -> Tensor v'5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.
                                +                        -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.
                                +                        -> (Tensor Build Data.Int.Int64, Tensor Build t)
                                +                        -- ^ (__output_indices__, __output_values__)
                                +                        --
                                +                        -- * __output_indices__: 2-D.  The indices of the output SparseTensor.
                                +                        --
                                +                        -- * __output_values__: 1-D.  The values of the output SparseTensor.
                                +sparseSparseMaximum' op'options a_indices a_values a_shape b_indices b_values
                                +                     b_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices,
                                +                                                             buildInputs a_values,
                                +                                                             buildInputs a_shape,
                                +                                                             buildInputs b_indices,
                                +                                                             buildInputs b_values,
                                +                                                             buildInputs b_shape]
                                +        return (opDef "SparseSparseMaximum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "a_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "a_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `a_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "a_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "b_indices"
                                +  description: "counterpart to `a_indices` for the other operand."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "b_values"
                                +  description: "counterpart to `a_values` for the other operand; must be of the same dtype."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "b_shape"
                                +  description: "counterpart to `a_shape` for the other operand; the two shapes must be equal."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_indices"
                                +  description: "2-D.  The indices of the output SparseTensor."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_values"
                                +  description: "1-D.  The values of the output SparseTensor."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns the element-wise min of two SparseTensors.
                                +--
                                +-- Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
                                +sparseSparseMinimum :: forall v'1 v'2 v'3 v'4 v'5 v'6
                                +                       t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int16, Data.Int.Int32,
                                +                                    Data.Int.Int64, Data.Int.Int8,
                                +                                    Data.Word.Word16, Data.Word.Word8, Double,
                                +                                    Float] t) => 
                                +                       Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                 -- SparseTensor, in the canonical lexicographic ordering.
                                +                       -> Tensor v'2 t -- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.
                                +                       -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand.
                                +                       -> Tensor v'5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.
                                +                       -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.
                                +                       -> (Tensor Build Data.Int.Int64, Tensor Build t)
                                +                       -- ^ (__output_indices__, __output_values__)
                                +                       --
                                +                       -- * __output_indices__: 2-D.  The indices of the output SparseTensor.
                                +                       --
                                +                       -- * __output_values__: 1-D.  The values of the output SparseTensor.
                                +sparseSparseMinimum = sparseSparseMinimum' id
                                +sparseSparseMinimum' :: forall v'1 v'2 v'3 v'4 v'5 v'6
                                +                        t . (OneOf '[(Data.Complex.Complex Double),
                                +                                     (Data.Complex.Complex Float),
                                +                                     Data.Int.Int16, Data.Int.Int32,
                                +                                     Data.Int.Int64, Data.Int.Int8,
                                +                                     Data.Word.Word16, Data.Word.Word8, Double,
                                +                                     Float] t) => OpParams ->
                                +                        Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
                                +                                                  -- SparseTensor, in the canonical lexicographic ordering.
                                +                        -> Tensor v'2 t -- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.
                                +                        -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.
                                +                        -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand.
                                +                        -> Tensor v'5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.
                                +                        -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.
                                +                        -> (Tensor Build Data.Int.Int64, Tensor Build t)
                                +                        -- ^ (__output_indices__, __output_values__)
                                +                        --
                                +                        -- * __output_indices__: 2-D.  The indices of the output SparseTensor.
                                +                        --
                                +                        -- * __output_values__: 1-D.  The values of the output SparseTensor.
                                +sparseSparseMinimum' op'options a_indices a_values a_shape b_indices b_values
                                +                     b_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices,
                                +                                                             buildInputs a_values,
                                +                                                             buildInputs a_shape,
                                +                                                             buildInputs b_indices,
                                +                                                             buildInputs b_values,
                                +                                                             buildInputs b_shape]
                                +        return (opDef "SparseSparseMinimum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "a_indices"
                                +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "a_values"
                                +  description: "1-D.  `N` non-empty values corresponding to `a_indices`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "a_shape"
                                +  description: "1-D.  Shape of the input SparseTensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "b_indices"
                                +  description: "counterpart to `a_indices` for the other operand."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "b_values"
                                +  description: "counterpart to `a_values` for the other operand; must be of the same dtype."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "b_shape"
                                +  description: "counterpart to `a_shape` for the other operand; the two shapes must be equal."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_indices"
                                +  description: "2-D.  The indices of the output SparseTensor."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_values"
                                +  description: "1-D.  The values of the output SparseTensor."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Split a `SparseTensor` into `num_split` tensors along one dimension.
                                +--
                                +-- If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
                                +-- `[0 : shape[split_dim] % num_split]` gets one extra dimension.
                                +-- For example, if `split_dim = 1` and `num_split = 2` and the input is
                                +-- 
                                +--     input_tensor = shape = [2, 7]
                                +--     [    a   d e  ]
                                +--     [b c          ]
                                +-- 
                                +-- Graphically the output tensors are:
                                +-- 
                                +--     output_tensor[0] = shape = [2, 4]
                                +--     [    a  ]
                                +--     [b c    ]
                                +-- 
                                +--     output_tensor[1] = shape = [2, 3]
                                +--     [ d e  ]
                                +--     [      ]
                                +sparseSplit :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => 
                                +               Data.Int.Int64 -- ^ __num_split__: The number of ways to split.
                                +               -> Tensor v'1 Data.Int.Int64 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
                                +                                            -- `[0, rank(shape))`.
                                +               -> Tensor v'2 Data.Int.Int64 -- ^ __indices__: 2-D tensor represents the indices of the sparse tensor.
                                +               -> Tensor v'3 t -- ^ __values__: 1-D tensor represents the values of the sparse tensor.
                                +               -> Tensor v'4 Data.Int.Int64 -- ^ __shape__: 1-D. tensor represents the shape of the sparse tensor.
                                +                                            -- output indices: A list of 1-D tensors represents the indices of the output
                                +                                            -- sparse tensors.
                                +               -> ([Tensor Build Data.Int.Int64], [Tensor Build t],
                                +                   [Tensor Build Data.Int.Int64])
                                +               -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +               --
                                +               -- * __output_indices__
                                +               --
                                +               -- * __output_values__: A list of 1-D tensors represents the values of the output sparse
                                +               -- tensors.
                                +               --
                                +               -- * __output_shape__: A list of 1-D tensors represents the shape of the output sparse
                                +               -- tensors.
                                +sparseSplit = sparseSplit' id
                                +sparseSplit' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams ->
                                +                Data.Int.Int64 -- ^ __num_split__: The number of ways to split.
                                +                -> Tensor v'1 Data.Int.Int64 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
                                +                                             -- `[0, rank(shape))`.
                                +                -> Tensor v'2 Data.Int.Int64 -- ^ __indices__: 2-D tensor represents the indices of the sparse tensor.
                                +                -> Tensor v'3 t -- ^ __values__: 1-D tensor represents the values of the sparse tensor.
                                +                -> Tensor v'4 Data.Int.Int64 -- ^ __shape__: 1-D. tensor represents the shape of the sparse tensor.
                                +                                             -- output indices: A list of 1-D tensors represents the indices of the output
                                +                                             -- sparse tensors.
                                +                -> ([Tensor Build Data.Int.Int64], [Tensor Build t],
                                +                    [Tensor Build Data.Int.Int64])
                                +                -- ^ (__output_indices__, __output_values__, __output_shape__)
                                +                --
                                +                -- * __output_indices__
                                +                --
                                +                -- * __output_values__: A list of 1-D tensors represents the values of the output sparse
                                +                -- tensors.
                                +                --
                                +                -- * __output_shape__: A list of 1-D tensors represents the shape of the output sparse
                                +                -- tensors.
                                +sparseSplit' op'options num_split split_dim indices values
                                +             shape | eqLengthGuard [] =
                                +    pureOp [num_split, num_split, num_split] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs split_dim,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs values,
                                +                                                             buildInputs shape]
                                +        return (opDef "SparseSplit"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "num_split" .~ num_split
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "split_dim"
                                +  description: "0-D.  The dimension along which to split.  Must be in the range\n`[0, rank(shape))`."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "2-D tensor represents the indices of the sparse tensor."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "values"
                                +  description: "1-D tensor represents the values of the sparse tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "shape"
                                +  description: "1-D. tensor represents the shape of the sparse tensor.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output_indices" type: DT_INT64 number_attr: "num_split"
                                +}
                                +output_arg {
                                +  name: "output_values"
                                +  description: "A list of 1-D tensors represents the values of the output sparse\ntensors."
                                +  type_attr: "T"
                                +  number_attr: "num_split"
                                +}
                                +output_arg {
                                +  name: "output_shape"
                                +  description: "A list of 1-D tensors represents the shape of the output sparse\ntensors."
                                +  type: DT_INT64
                                +  number_attr: "num_split"
                                +}
                                +attr {
                                +  name: "num_split"
                                +  type: "int"
                                +  description: "The number of ways to split."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
                                +--
                                +-- This Op does not require `a_indices` be sorted in standard lexicographic order.
                                +sparseTensorDenseAdd :: forall v'1 v'2 v'3 v'4 t
                                +                        tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t,
                                +                                    OneOf '[Data.Int.Int32,
                                +                                            Data.Int.Int64] tindices) => 
                                +                        Tensor v'1 tindices -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
                                +                        -> Tensor v'2 t -- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.
                                +                        -> Tensor v'3 tindices -- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.
                                +                        -> Tensor v'4 t -- ^ __b__: `ndims`-D Tensor.  With shape `a_shape`.
                                +                        -> Tensor Build t -- ^ __output__
                                +sparseTensorDenseAdd = sparseTensorDenseAdd' id
                                +sparseTensorDenseAdd' :: forall v'1 v'2 v'3 v'4 t
                                +                         tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                             (Data.Complex.Complex Float),
                                +                                             Data.Int.Int16, Data.Int.Int32,
                                +                                             Data.Int.Int64, Data.Int.Int8,
                                +                                             Data.Word.Word16, Data.Word.Word8,
                                +                                             Double, Float] t,
                                +                                     OneOf '[Data.Int.Int32,
                                +                                             Data.Int.Int64] tindices) =>
                                +                         OpParams ->
                                +                         Tensor v'1 tindices -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
                                +                         -> Tensor v'2 t -- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.
                                +                         -> Tensor v'3 tindices -- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.
                                +                         -> Tensor v'4 t -- ^ __b__: `ndims`-D Tensor.  With shape `a_shape`.
                                +                         -> Tensor Build t -- ^ __output__
                                +sparseTensorDenseAdd' op'options a_indices a_values a_shape
                                +                      b | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices,
                                +                                                             buildInputs a_values,
                                +                                                             buildInputs a_shape,
                                +                                                             buildInputs b]
                                +        return (opDef "SparseTensorDenseAdd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "a_indices"
                                +  description: "2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "a_values"
                                +  description: "1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "a_shape"
                                +  description: "1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "b"
                                +  description: "`ndims`-D Tensor.  With shape `a_shape`."
                                +  type_attr: "T"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
                                +--
                                +-- No validity checking is performed on the indices of A.  However, the following
                                +-- input format is recommended for optimal behavior:
                                +-- 
                                +-- if adjoint_a == false:
                                +--   A should be sorted in lexicographically increasing order.  Use SparseReorder
                                +--   if you're not sure.
                                +-- if adjoint_a == true:
                                +--   A should be sorted in order of increasing dimension 1 (i.e., "column major"
                                +--   order instead of "row major" order).
                                +sparseTensorDenseMatMul :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t,
                                +                                                                OneOf '[Data.Int.Int32,
                                +                                                                        Data.Int.Int64] tindices) =>
                                +                           
                                +                           Tensor v'1 tindices -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
                                +                           -> Tensor v'2 t -- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.
                                +                           -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.
                                +                           -> Tensor v'4 t -- ^ __b__: 2-D.  A dense Matrix.
                                +                           -> Tensor Build t -- ^ __product__
                                +sparseTensorDenseMatMul = sparseTensorDenseMatMul' id
                                +sparseTensorDenseMatMul' :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t,
                                +                                                                 OneOf '[Data.Int.Int32,
                                +                                                                         Data.Int.Int64] tindices) =>
                                +                            OpParams ->
                                +                            Tensor v'1 tindices -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
                                +                            -> Tensor v'2 t -- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.
                                +                            -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.
                                +                            -> Tensor v'4 t -- ^ __b__: 2-D.  A dense Matrix.
                                +                            -> Tensor Build t -- ^ __product__
                                +sparseTensorDenseMatMul' op'options a_indices a_values a_shape
                                +                         b | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices,
                                +                                                             buildInputs a_values,
                                +                                                             buildInputs a_shape,
                                +                                                             buildInputs b]
                                +        return (opDef "SparseTensorDenseMatMul"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "a_indices"
                                +  description: "2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "a_values"
                                +  description: "1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "a_shape"
                                +  description: "1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "b" description: "2-D.  A dense Matrix." type_attr: "T"
                                +}
                                +output_arg { name: "product" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  default_value { type: DT_INT64 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "adjoint_a"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Use the adjoint of A in the matrix multiply.  If A is complex, this\nis transpose(conj(A)).  Otherwise it\'s transpose(A)."
                                +}
                                +attr {
                                +  name: "adjoint_b"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Use the adjoint of B in the matrix multiply.  If B is complex, this\nis transpose(conj(B)).  Otherwise it\'s transpose(B)."
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that splits a SparseTensor into elements row-wise.
                                +
                                +sparseTensorSliceDataset :: forall v'1 v'2 v'3 tvalues m' . (MonadBuild m',
                                +                                                             TensorType tvalues) =>
                                +                            
                                +                            Tensor v'1 Data.Int.Int64 -- ^ __indices__
                                +                            -> Tensor v'2 tvalues -- ^ __values__
                                +                            -> Tensor v'3 Data.Int.Int64 -- ^ __dense_shape__
                                +                            -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +sparseTensorSliceDataset = sparseTensorSliceDataset' id
                                +sparseTensorSliceDataset' :: forall v'1 v'2 v'3 tvalues m' . (MonadBuild m',
                                +                                                              TensorType tvalues) =>
                                +                             OpParams ->
                                +                             Tensor v'1 Data.Int.Int64 -- ^ __indices__
                                +                             -> Tensor v'2 tvalues -- ^ __values__
                                +                             -> Tensor v'3 Data.Int.Int64 -- ^ __dense_shape__
                                +                             -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +sparseTensorSliceDataset' op'options indices values
                                +                          dense_shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices,
                                +                                                             buildInputs values,
                                +                                                             buildInputs dense_shape]
                                +        buildOp [] (opDef "SparseTensorSliceDataset"
                                +                    & opAttr "Tvalues" .~ tensorType (undefined :: tvalues)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "indices" type: DT_INT64 }
                                +input_arg { name: "values" type_attr: "Tvalues" }
                                +input_arg { name: "dense_shape" type: DT_INT64 }
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr { name: "Tvalues" type: "type" }
                                +-}
                                +
                                +-- | Converts a sparse representation into a dense tensor.
                                +--
                                +-- Builds an array `dense` with shape `output_shape` such that
                                +-- 
                                +-- ```
                                +-- # If sparse_indices is scalar
                                +-- dense[i] = (i == sparse_indices ? sparse_values : default_value)
                                +-- 
                                +-- # If sparse_indices is a vector, then for each i
                                +-- dense[sparse_indices[i]] = sparse_values[i]
                                +-- 
                                +-- # If sparse_indices is an n by d matrix, then for each i in [0, n)
                                +-- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
                                +-- ```
                                +-- 
                                +-- All other values in `dense` are set to `default_value`.  If `sparse_values` is a
                                +-- scalar, all sparse indices are set to this single value.
                                +-- 
                                +-- Indices should be sorted in lexicographic order, and indices must not
                                +-- contain any repeats. If `validate_indices` is true, these properties
                                +-- are checked during execution.
                                +sparseToDense :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t,
                                +                                                      OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] tindices) =>
                                +                 
                                +                 Tensor v'1 tindices -- ^ __sparse_indices__: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
                                +                                     -- index where `sparse_values[i]` will be placed.
                                +                 -> Tensor v'2 tindices -- ^ __output_shape__: 1-D.  Shape of the dense output tensor.
                                +                 -> Tensor v'3 t -- ^ __sparse_values__: 1-D.  Values corresponding to each row of `sparse_indices`,
                                +                                 -- or a scalar value to be used for all sparse indices.
                                +                 -> Tensor v'4 t -- ^ __default_value__: Scalar value to set for indices not specified in
                                +                                 -- `sparse_indices`.
                                +                 -> Tensor Build t -- ^ __dense__: Dense output tensor of shape `output_shape`.
                                +sparseToDense = sparseToDense' id
                                +sparseToDense' :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t,
                                +                                                       OneOf '[Data.Int.Int32,
                                +                                                               Data.Int.Int64] tindices) =>
                                +                  OpParams ->
                                +                  Tensor v'1 tindices -- ^ __sparse_indices__: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
                                +                                      -- index where `sparse_values[i]` will be placed.
                                +                  -> Tensor v'2 tindices -- ^ __output_shape__: 1-D.  Shape of the dense output tensor.
                                +                  -> Tensor v'3 t -- ^ __sparse_values__: 1-D.  Values corresponding to each row of `sparse_indices`,
                                +                                  -- or a scalar value to be used for all sparse indices.
                                +                  -> Tensor v'4 t -- ^ __default_value__: Scalar value to set for indices not specified in
                                +                                  -- `sparse_indices`.
                                +                  -> Tensor Build t -- ^ __dense__: Dense output tensor of shape `output_shape`.
                                +sparseToDense' op'options sparse_indices output_shape sparse_values
                                +               default_value | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices,
                                +                                                             buildInputs output_shape,
                                +                                                             buildInputs sparse_values,
                                +                                                             buildInputs default_value]
                                +        return (opDef "SparseToDense"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sparse_indices"
                                +  description: "0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete\nindex where `sparse_values[i]` will be placed."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "output_shape"
                                +  description: "1-D.  Shape of the dense output tensor."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg {
                                +  name: "sparse_values"
                                +  description: "1-D.  Values corresponding to each row of `sparse_indices`,\nor a scalar value to be used for all sparse indices."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "default_value"
                                +  description: "Scalar value to set for indices not specified in\n`sparse_indices`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "dense"
                                +  description: "Dense output tensor of shape `output_shape`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "validate_indices"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If true, indices are checked to make sure they are sorted in\nlexicographic order and that there are no repeats."
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Applies set operation along last dimension of 2 `SparseTensor` inputs.
                                +--
                                +-- See SetOperationOp::SetOperationFromContext for values of `set_operation`.
                                +-- 
                                +-- If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
                                +-- order and range of `set1` and `set2` indices.
                                +-- 
                                +-- Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
                                +-- and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
                                +-- as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
                                +-- ignored.
                                +-- 
                                +-- Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
                                +-- and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
                                +-- as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
                                +-- ignored.
                                +-- 
                                +-- If `validate_indices` is `True`, this op validates the order and range of `set1`
                                +-- and `set2` indices.
                                +-- 
                                +-- Output `result` is a `SparseTensor` represented by `result_indices`,
                                +-- `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
                                +-- has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
                                +-- dimension contains the result of `set_operation` applied to the corresponding
                                +-- `[0...n-1]` dimension of `set`.
                                +sparseToSparseSetOperation :: forall v'1 v'2 v'3 v'4 v'5 v'6
                                +                              t . (OneOf '[Data.ByteString.ByteString,
                                +                                           Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16,
                                +                                           Data.Word.Word8] t) => 
                                +                              Tensor v'1 Data.Int.Int64 -- ^ __set1_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
                                +                                                        -- order.
                                +                              -> Tensor v'2 t -- ^ __set1_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
                                +                                              -- order.
                                +                              -> Tensor v'3 Data.Int.Int64 -- ^ __set1_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
                                +                                                           -- be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
                                +                                                           -- max set size across `0...n-1` dimensions.
                                +                              -> Tensor v'4 Data.Int.Int64 -- ^ __set2_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
                                +                                                           -- order.
                                +                              -> Tensor v'5 t -- ^ __set2_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
                                +                                              -- order.
                                +                              -> Tensor v'6 Data.Int.Int64 -- ^ __set2_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
                                +                                                           -- be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
                                +                                                           -- max set size across `0...n-1` dimensions.
                                +                              -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                                  Tensor Build Data.Int.Int64)
                                +                              -- ^ (__result_indices__, __result_values__, __result_shape__)
                                +                              --
                                +                              -- * __result_indices__: 2D indices of a `SparseTensor`.
                                +                              --
                                +                              -- * __result_values__: 1D values of a `SparseTensor`.
                                +                              --
                                +                              -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
                                +                              -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
                                +                              -- is the max result set size across all `0...n-1` dimensions.
                                +sparseToSparseSetOperation = sparseToSparseSetOperation' id
                                +sparseToSparseSetOperation' :: forall v'1 v'2 v'3 v'4 v'5 v'6
                                +                               t . (OneOf '[Data.ByteString.ByteString,
                                +                                            Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16,
                                +                                            Data.Word.Word8] t) => OpParams ->
                                +                               Tensor v'1 Data.Int.Int64 -- ^ __set1_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
                                +                                                         -- order.
                                +                               -> Tensor v'2 t -- ^ __set1_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
                                +                                               -- order.
                                +                               -> Tensor v'3 Data.Int.Int64 -- ^ __set1_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
                                +                                                            -- be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
                                +                                                            -- max set size across `0...n-1` dimensions.
                                +                               -> Tensor v'4 Data.Int.Int64 -- ^ __set2_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
                                +                                                            -- order.
                                +                               -> Tensor v'5 t -- ^ __set2_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
                                +                                               -- order.
                                +                               -> Tensor v'6 Data.Int.Int64 -- ^ __set2_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
                                +                                                            -- be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
                                +                                                            -- max set size across `0...n-1` dimensions.
                                +                               -> (Tensor Build Data.Int.Int64, Tensor Build t,
                                +                                   Tensor Build Data.Int.Int64)
                                +                               -- ^ (__result_indices__, __result_values__, __result_shape__)
                                +                               --
                                +                               -- * __result_indices__: 2D indices of a `SparseTensor`.
                                +                               --
                                +                               -- * __result_values__: 1D values of a `SparseTensor`.
                                +                               --
                                +                               -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
                                +                               -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
                                +                               -- is the max result set size across all `0...n-1` dimensions.
                                +sparseToSparseSetOperation' op'options set1_indices set1_values set1_shape
                                +                            set2_indices set2_values
                                +                            set2_shape | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set1_indices,
                                +                                                             buildInputs set1_values,
                                +                                                             buildInputs set1_shape,
                                +                                                             buildInputs set2_indices,
                                +                                                             buildInputs set2_values,
                                +                                                             buildInputs set2_shape]
                                +        return (opDef "SparseToSparseSetOperation"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "set1_indices"
                                +  description: "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "set1_values"
                                +  description: "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "set1_shape"
                                +  description: "1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must\nbe the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the\nmax set size across `0...n-1` dimensions."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "set2_indices"
                                +  description: "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "set2_values"
                                +  description: "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "set2_shape"
                                +  description: "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the\nmax set size across `0...n-1` dimensions."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "result_indices"
                                +  description: "2D indices of a `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "result_values"
                                +  description: "1D values of a `SparseTensor`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "result_shape"
                                +  description: "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions."
                                +  type: DT_INT64
                                +}
                                +attr { name: "set_operation" type: "string" }
                                +attr {
                                +  name: "validate_indices" type: "bool" default_value { b: true }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT8
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_STRING
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Splits a tensor into `num_split` tensors along one dimension.
                                +
                                +split :: forall v'1 v'2 t . (TensorType t) => 
                                +         Data.Int.Int64 -- ^ __num_split__: The number of ways to split.  Must evenly divide
                                +                        -- `value.shape[split_dim]`.
                                +         -> Tensor v'1 Data.Int.Int32 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
                                +                                      -- `[-rank(value), rank(value))`.
                                +         -> Tensor v'2 t -- ^ __value__: The tensor to split.
                                +         -> [Tensor Build t] -- ^ __output__: They are identically shaped tensors, whose shape matches that of `value`
                                +         -- except along `split_dim`, where their sizes are
                                +         -- `values.shape[split_dim] / num_split`.
                                +split = split' id
                                +split' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +          Data.Int.Int64 -- ^ __num_split__: The number of ways to split.  Must evenly divide
                                +                         -- `value.shape[split_dim]`.
                                +          -> Tensor v'1 Data.Int.Int32 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
                                +                                       -- `[-rank(value), rank(value))`.
                                +          -> Tensor v'2 t -- ^ __value__: The tensor to split.
                                +          -> [Tensor Build t] -- ^ __output__: They are identically shaped tensors, whose shape matches that of `value`
                                +          -- except along `split_dim`, where their sizes are
                                +          -- `values.shape[split_dim] / num_split`.
                                +split' op'options num_split split_dim value | eqLengthGuard [] =
                                +    pureOp [num_split] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs split_dim,
                                +                                                             buildInputs value]
                                +        return (opDef "Split"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "num_split" .~ num_split
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "split_dim"
                                +  description: "0-D.  The dimension along which to split.  Must be in the range\n`[-rank(value), rank(value))`."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "value" description: "The tensor to split." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "They are identically shaped tensors, whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`values.shape[split_dim] / num_split`."
                                +  type_attr: "T"
                                +  number_attr: "num_split"
                                +}
                                +attr {
                                +  name: "num_split"
                                +  type: "int"
                                +  description: "The number of ways to split.  Must evenly divide\n`value.shape[split_dim]`."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Splits a tensor into `num_split` tensors along one dimension.
                                +
                                +splitV :: forall v'1 v'2 v'3 t tlen . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] tlen) =>
                                +          
                                +          Data.Int.Int64 -- ^ __num_split__
                                +          -> Tensor v'1 t -- ^ __value__: The tensor to split.
                                +          -> Tensor v'2 tlen -- ^ __size_splits__: list containing the sizes of each output tensor along the split
                                +                             -- dimension. Must sum to the dimension of value along split_dim.
                                +                             -- Can contain one -1 indicating that dimension is to be inferred.
                                +          -> Tensor v'3 Data.Int.Int32 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
                                +                                       -- `[-rank(value), rank(value))`.
                                +          -> [Tensor Build t] -- ^ __output__: Tensors whose shape matches that of `value`
                                +          -- except along `split_dim`, where their sizes are
                                +          -- `size_splits[i]`.
                                +splitV = splitV' id
                                +splitV' :: forall v'1 v'2 v'3 t tlen . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] tlen) =>
                                +           OpParams ->
                                +           Data.Int.Int64 -- ^ __num_split__
                                +           -> Tensor v'1 t -- ^ __value__: The tensor to split.
                                +           -> Tensor v'2 tlen -- ^ __size_splits__: list containing the sizes of each output tensor along the split
                                +                              -- dimension. Must sum to the dimension of value along split_dim.
                                +                              -- Can contain one -1 indicating that dimension is to be inferred.
                                +           -> Tensor v'3 Data.Int.Int32 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
                                +                                        -- `[-rank(value), rank(value))`.
                                +           -> [Tensor Build t] -- ^ __output__: Tensors whose shape matches that of `value`
                                +           -- except along `split_dim`, where their sizes are
                                +           -- `size_splits[i]`.
                                +splitV' op'options num_split value size_splits split_dim | eqLengthGuard [] =
                                +    pureOp [num_split] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value,
                                +                                                             buildInputs size_splits,
                                +                                                             buildInputs split_dim]
                                +        return (opDef "SplitV"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tlen" .~ tensorType (undefined :: tlen)
                                +                & opAttr "num_split" .~ num_split
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value" description: "The tensor to split." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "size_splits"
                                +  description: "list containing the sizes of each output tensor along the split\ndimension. Must sum to the dimension of value along split_dim.\nCan contain one -1 indicating that dimension is to be inferred."
                                +  type_attr: "Tlen"
                                +}
                                +input_arg {
                                +  name: "split_dim"
                                +  description: "0-D.  The dimension along which to split.  Must be in the range\n`[-rank(value), rank(value))`."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Tensors whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`size_splits[i]`."
                                +  type_attr: "T"
                                +  number_attr: "num_split"
                                +}
                                +attr { name: "num_split" type: "int" has_minimum: true minimum: 1 }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tlen"
                                +  type: "type"
                                +  default_value { type: DT_INT64 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes square root of x element-wise.
                                +--
                                +-- I.e., \\(y = \sqrt{x} = x^{1/2}\\).
                                +sqrt :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +sqrt = sqrt' id
                                +sqrt' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +sqrt' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Sqrt"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the gradient for the sqrt of `x` wrt its input.
                                +--
                                +-- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
                                +-- is the corresponding input gradient.
                                +sqrtGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Word.Word16, Double, Float] t) => 
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build t -- ^ __z__
                                +sqrtGrad = sqrtGrad' id
                                +sqrtGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Word.Word16, Double, Float] t) =>
                                +             OpParams ->
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor v'2 t -- ^ __y__
                                +             -> Tensor Build t -- ^ __z__
                                +sqrtGrad' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "SqrtGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes square of x element-wise.
                                +--
                                +-- I.e., \\(y = x * x = x^2\\).
                                +square :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                  (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                  Data.Int.Int64, Data.Word.Word16, Double,
                                +                                  Float] t) => 
                                +          Tensor v'1 t -- ^ __x__
                                +          -> Tensor Build t -- ^ __y__
                                +square = square' id
                                +square' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                   (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                   Data.Int.Int64, Data.Word.Word16, Double,
                                +                                   Float] t) => OpParams ->
                                +           Tensor v'1 t -- ^ __x__
                                +           -> Tensor Build t -- ^ __y__
                                +square' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Square"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns (x - y)(x - y) element-wise.
                                +--
                                +-- *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +squaredDifference :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                                 (Data.Complex.Complex Float),
                                +                                                 Data.Int.Int32, Data.Int.Int64,
                                +                                                 Data.Word.Word16, Double,
                                +                                                 Float] t) => 
                                +                     Tensor v'1 t -- ^ __x__
                                +                     -> Tensor v'2 t -- ^ __y__
                                +                     -> Tensor Build t -- ^ __z__
                                +squaredDifference = squaredDifference' id
                                +squaredDifference' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                                  (Data.Complex.Complex Float),
                                +                                                  Data.Int.Int32,
                                +                                                  Data.Int.Int64,
                                +                                                  Data.Word.Word16, Double,
                                +                                                  Float] t) => OpParams ->
                                +                      Tensor v'1 t -- ^ __x__
                                +                      -> Tensor v'2 t -- ^ __y__
                                +                      -> Tensor Build t -- ^ __z__
                                +squaredDifference' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "SquaredDifference"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Removes dimensions of size 1 from the shape of a tensor.
                                +--
                                +-- Given a tensor `input`, this operation returns a tensor of the same type with
                                +-- all dimensions of size 1 removed. If you don't want to remove all size 1
                                +-- dimensions, you can remove specific size 1 dimensions by specifying
                                +-- `squeeze_dims`.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
                                +-- shape(squeeze(t)) ==> [2, 3]
                                +-- ```
                                +-- 
                                +-- Or, to remove specific size 1 dimensions:
                                +-- 
                                +-- ```
                                +-- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
                                +-- shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
                                +-- ```
                                +squeeze :: forall v'1 t . (TensorType t) => 
                                +           Tensor v'1 t -- ^ __input__: The `input` to squeeze.
                                +           -> Tensor Build t -- ^ __output__: Contains the same data as `input`, but has one or more dimensions of
                                +           -- size 1 removed.
                                +squeeze = squeeze' id
                                +squeeze' :: forall v'1 t . (TensorType t) => OpParams ->
                                +            Tensor v'1 t -- ^ __input__: The `input` to squeeze.
                                +            -> Tensor Build t -- ^ __output__: Contains the same data as `input`, but has one or more dimensions of
                                +            -- size 1 removed.
                                +squeeze' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Squeeze"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The `input` to squeeze." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Contains the same data as `input`, but has one or more dimensions of\nsize 1 removed."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "squeeze_dims"
                                +  type: "list(int)"
                                +  default_value { list { } }
                                +  description: "If specified, only squeezes the dimensions listed. The dimension\nindex starts at 0. It is an error to squeeze a dimension that is not 1."
                                +  has_minimum: true
                                +}
                                +-}
                                +
                                +-- | Deprecated, use StackV2.
                                +
                                +stack :: forall m' . (MonadBuild m') => 
                                +         DataType -- ^ __elem_type__
                                +         -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__
                                +stack = stack' id
                                +stack' :: forall m' . (MonadBuild m') => OpParams ->
                                +          DataType -- ^ __elem_type__
                                +          -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__
                                +stack' op'options elem_type | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "Stack"
                                +                    & opAttr "elem_type" .~ elem_type
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "handle" type: DT_STRING is_ref: true }
                                +attr { name: "elem_type" type: "type" }
                                +attr { name: "stack_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Deprecated, use StackCloseV2.
                                +
                                +stackClose :: forall m' . (MonadBuild m') => 
                                +              Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +              -> m' (ControlNode)
                                +stackClose = stackClose' id
                                +stackClose' :: forall m' . (MonadBuild m') => OpParams ->
                                +               Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +               -> m' (ControlNode)
                                +stackClose' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "StackClose"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +-}
                                +
                                +-- | Delete the stack from its resource container.
                                +
                                +stackCloseV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a stack.
                                +                -> m' (ControlNode)
                                +stackCloseV2 = stackCloseV2' id
                                +stackCloseV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                 Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a stack.
                                +                 -> m' (ControlNode)
                                +stackCloseV2' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "StackCloseV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a stack."
                                +  type: DT_RESOURCE
                                +}
                                +-}
                                +
                                +-- | Deprecated, use StackPopV2.
                                +
                                +stackPop :: forall elem_type m' . (MonadBuild m', TensorType elem_type) => 
                                +            Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +            -> m' (Tensor Value elem_type) -- ^ __elem__
                                +stackPop = stackPop' id
                                +stackPop' :: forall elem_type m' . (MonadBuild m', TensorType elem_type) =>
                                +             OpParams ->
                                +             Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +             -> m' (Tensor Value elem_type) -- ^ __elem__
                                +stackPop' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "StackPop"
                                +                    & opAttr "elem_type" .~ tensorType (undefined :: elem_type)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +output_arg { name: "elem" type_attr: "elem_type" }
                                +attr { name: "elem_type" type: "type" }
                                +-}
                                +
                                +-- | Pop the element at the top of the stack.
                                +
                                +stackPopV2 :: forall v'1 elem_type m' . (MonadBuild m', TensorType elem_type) =>
                                +              
                                +              Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a stack.
                                +              -> m' (Tensor Value elem_type) -- ^ __elem__: The tensor that is popped from the top of the stack.
                                +stackPopV2 = stackPopV2' id
                                +stackPopV2' :: forall v'1 elem_type m' . (MonadBuild m',
                                +                                          TensorType elem_type) => OpParams ->
                                +               Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a stack.
                                +               -> m' (Tensor Value elem_type) -- ^ __elem__: The tensor that is popped from the top of the stack.
                                +stackPopV2' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "StackPopV2"
                                +                    & opAttr "elem_type" .~ tensorType (undefined :: elem_type)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a stack."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg {
                                +  name: "elem"
                                +  description: "The tensor that is popped from the top of the stack."
                                +  type_attr: "elem_type"
                                +}
                                +attr {
                                +  name: "elem_type"
                                +  type: "type"
                                +  description: "The type of the elem that is popped."
                                +}
                                +-}
                                +
                                +-- | Deprecated, use StackPushV2.
                                +
                                +stackPush :: forall v'2 t m' . (MonadBuild m', TensorType t) => 
                                +             Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +             -> Tensor v'2 t -- ^ __elem__
                                +             -> m' (Tensor Value t) -- ^ __output__
                                +stackPush = stackPush' id
                                +stackPush' :: forall v'2 t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +              Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +              -> Tensor v'2 t -- ^ __elem__
                                +              -> m' (Tensor Value t) -- ^ __output__
                                +stackPush' op'options handle elem | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs elem]
                                +        buildOp [] (opDef "StackPush"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "elem" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "swap_memory" type: "bool" default_value { b: false }
                                +}
                                +-}
                                +
                                +-- | Push an element onto the stack.
                                +
                                +stackPushV2 :: forall v'1 v'2 t m' . (MonadBuild m', TensorType t) => 
                                +               Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a stack.
                                +               -> Tensor v'2 t -- ^ __elem__: The tensor to be pushed onto the stack.
                                +               -> m' (Tensor Value t) -- ^ __output__: The same tensor as the input 'elem'.
                                +stackPushV2 = stackPushV2' id
                                +stackPushV2' :: forall v'1 v'2 t m' . (MonadBuild m', TensorType t) =>
                                +                OpParams ->
                                +                Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a stack.
                                +                -> Tensor v'2 t -- ^ __elem__: The tensor to be pushed onto the stack.
                                +                -> m' (Tensor Value t) -- ^ __output__: The same tensor as the input 'elem'.
                                +stackPushV2' op'options handle elem | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs elem]
                                +        buildOp [] (opDef "StackPushV2"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a stack."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "elem"
                                +  description: "The tensor to be pushed onto the stack."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The same tensor as the input \'elem\'."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "swap_memory"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "Swap `elem` to CPU. Default to false."
                                +}
                                +-}
                                +
                                +-- | A stack that produces elements in first-in last-out order.
                                +
                                +stackV2 :: forall v'1 m' . (MonadBuild m') => 
                                +           DataType -- ^ __elem_type__: The type of the elements on the stack.
                                +           -> Tensor v'1 Data.Int.Int32 -- ^ __max_size__: The maximum size of the stack if non-negative. If negative, the stack
                                +                                        -- size is unlimited.
                                +           -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the stack.
                                +stackV2 = stackV2' id
                                +stackV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +            DataType -- ^ __elem_type__: The type of the elements on the stack.
                                +            -> Tensor v'1 Data.Int.Int32 -- ^ __max_size__: The maximum size of the stack if non-negative. If negative, the stack
                                +                                         -- size is unlimited.
                                +            -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the stack.
                                +stackV2' op'options elem_type max_size | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs max_size]
                                +        buildOp [] (opDef "StackV2"
                                +                    & opAttr "elem_type" .~ elem_type
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "max_size"
                                +  description: "The maximum size of the stack if non-negative. If negative, the stack\nsize is unlimited."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the stack."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "elem_type"
                                +  type: "type"
                                +  description: "The type of the elements on the stack."
                                +}
                                +attr {
                                +  name: "stack_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "Overrides the name used for the temporary stack resource. Default\nvalue is the name of the \'Stack\' op (which is guaranteed unique)."
                                +}
                                +-}
                                +
                                +-- | Stage values similar to a lightweight Enqueue.
                                +--
                                +-- The basic functionality of this Op is similar to a queue with many
                                +-- fewer capabilities and options.  This Op is optimized for performance.
                                +stage :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => 
                                +         TensorList (v'1) dtypes -- ^ __values__: a list of tensors
                                +                                 -- dtypes A list of data types that inserted values should adhere to.
                                +         -> m' (ControlNode)
                                +stage = stage' id
                                +stage' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) =>
                                +          OpParams ->
                                +          TensorList (v'1) dtypes -- ^ __values__: a list of tensors
                                +                                  -- dtypes A list of data types that inserted values should adhere to.
                                +          -> m' (ControlNode)
                                +stage' op'options values | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values]
                                +        buildOp [] (opDef "Stage"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "values"
                                +  description: "a list of tensors\ndtypes A list of data types that inserted values should adhere to."
                                +  type_list_attr: "dtypes"
                                +}
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "The maximum number of bytes allowed for Tensors in the Staging Area.\nIf > 0, inserts will block until sufficient space is available."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "It is necessary to match this name to the matching Unstage Op."
                                +}
                                +-}
                                +
                                +-- | Op removes all elements in the underlying container.
                                +
                                +stageClear :: forall m' . (MonadBuild m') => 
                                +              [DataType] -- ^ __dtypes__
                                +              -> m' (ControlNode)
                                +stageClear = stageClear' id
                                +stageClear' :: forall m' . (MonadBuild m') => OpParams ->
                                +               [DataType] -- ^ __dtypes__
                                +               -> m' (ControlNode)
                                +stageClear' op'options dtypes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "StageClear"
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op peeks at the values at the specified index.  If the
                                +--
                                +-- underlying container does not contain sufficient elements
                                +-- this op will block until it does.   This Op is optimized for
                                +-- performance.
                                +stagePeek :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => 
                                +             Tensor v'1 Data.Int.Int32 -- ^ __index__
                                +             -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +stagePeek = stagePeek' id
                                +stagePeek' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) =>
                                +              OpParams ->
                                +              Tensor v'1 Data.Int.Int32 -- ^ __index__
                                +              -> m' (TensorList (Value) dtypes) -- ^ __values__
                                +stagePeek' op'options index | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs index]
                                +        buildOp [] (opDef "StagePeek"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "index" type: DT_INT32 }
                                +output_arg { name: "values" type_list_attr: "dtypes" }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Op returns the number of elements in the underlying container.
                                +
                                +stageSize :: forall m' . (MonadBuild m') => 
                                +             [DataType] -- ^ __dtypes__
                                +             -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +stageSize = stageSize' id
                                +stageSize' :: forall m' . (MonadBuild m') => OpParams ->
                                +              [DataType] -- ^ __dtypes__
                                +              -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +stageSize' op'options dtypes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "StageSize"
                                +                    & opAttr "dtypes" .~ dtypes
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "size" type: DT_INT32 }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr { name: "dtypes" type: "list(type)" }
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Outputs deterministic pseudorandom values from a normal distribution.
                                +--
                                +-- The generated values will have mean 0 and standard deviation 1.
                                +-- 
                                +-- The outputs are a deterministic function of `shape` and `seed`.
                                +statelessRandomNormal :: forall v'1 v'2 dtype t . (OneOf '[Data.Word.Word16,
                                +                                                           Double, Float] dtype,
                                +                                                   OneOf '[Data.Int.Int32,
                                +                                                           Data.Int.Int64] t) =>
                                +                         
                                +                         Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                         -> Tensor v'2 Data.Int.Int64 -- ^ __seed__: 2 seeds (shape [2]).
                                +                         -> Tensor Build dtype -- ^ __output__: Random values with specified shape.
                                +statelessRandomNormal = statelessRandomNormal' id
                                +statelessRandomNormal' :: forall v'1 v'2 dtype t . (OneOf '[Data.Word.Word16,
                                +                                                            Double,
                                +                                                            Float] dtype,
                                +                                                    OneOf '[Data.Int.Int32,
                                +                                                            Data.Int.Int64] t) =>
                                +                          OpParams ->
                                +                          Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                          -> Tensor v'2 Data.Int.Int64 -- ^ __seed__: 2 seeds (shape [2]).
                                +                          -> Tensor Build dtype -- ^ __output__: Random values with specified shape.
                                +statelessRandomNormal' op'options shape seed | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape,
                                +                                                             buildInputs seed]
                                +        return (opDef "StatelessRandomNormal"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "The shape of the output tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "seed" description: "2 seeds (shape [2])." type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Random values with specified shape."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  description: "The type of the output."
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Outputs deterministic pseudorandom random values from a uniform distribution.
                                +--
                                +-- The generated values follow a uniform distribution in the range `[0, 1)`. The
                                +-- lower bound 0 is included in the range, while the upper bound 1 is excluded.
                                +-- 
                                +-- The outputs are a deterministic function of `shape` and `seed`.
                                +statelessRandomUniform :: forall v'1 v'2 dtype t . (OneOf '[Data.Word.Word16,
                                +                                                            Double,
                                +                                                            Float] dtype,
                                +                                                    OneOf '[Data.Int.Int32,
                                +                                                            Data.Int.Int64] t) =>
                                +                          
                                +                          Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                          -> Tensor v'2 Data.Int.Int64 -- ^ __seed__: 2 seeds (shape [2]).
                                +                          -> Tensor Build dtype -- ^ __output__: Random values with specified shape.
                                +statelessRandomUniform = statelessRandomUniform' id
                                +statelessRandomUniform' :: forall v'1 v'2 dtype t . (OneOf '[Data.Word.Word16,
                                +                                                             Double,
                                +                                                             Float] dtype,
                                +                                                     OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] t) =>
                                +                           OpParams ->
                                +                           Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                           -> Tensor v'2 Data.Int.Int64 -- ^ __seed__: 2 seeds (shape [2]).
                                +                           -> Tensor Build dtype -- ^ __output__: Random values with specified shape.
                                +statelessRandomUniform' op'options shape seed | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape,
                                +                                                             buildInputs seed]
                                +        return (opDef "StatelessRandomUniform"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "The shape of the output tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "seed" description: "2 seeds (shape [2])." type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Random values with specified shape."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  description: "The type of the output."
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Outputs deterministic pseudorandom values from a truncated normal distribution.
                                +--
                                +-- The generated values follow a normal distribution with mean 0 and standard
                                +-- deviation 1, except that values whose magnitude is more than 2 standard
                                +-- deviations from the mean are dropped and re-picked.
                                +-- 
                                +-- The outputs are a deterministic function of `shape` and `seed`.
                                +statelessTruncatedNormal :: forall v'1 v'2 dtype t . (OneOf '[Data.Word.Word16,
                                +                                                              Double,
                                +                                                              Float] dtype,
                                +                                                      OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] t) =>
                                +                            
                                +                            Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                            -> Tensor v'2 Data.Int.Int64 -- ^ __seed__: 2 seeds (shape [2]).
                                +                            -> Tensor Build dtype -- ^ __output__: Random values with specified shape.
                                +statelessTruncatedNormal = statelessTruncatedNormal' id
                                +statelessTruncatedNormal' :: forall v'1 v'2 dtype t . (OneOf '[Data.Word.Word16,
                                +                                                               Double,
                                +                                                               Float] dtype,
                                +                                                       OneOf '[Data.Int.Int32,
                                +                                                               Data.Int.Int64] t) =>
                                +                             OpParams ->
                                +                             Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                             -> Tensor v'2 Data.Int.Int64 -- ^ __seed__: 2 seeds (shape [2]).
                                +                             -> Tensor Build dtype -- ^ __output__: Random values with specified shape.
                                +statelessTruncatedNormal' op'options shape seed | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape,
                                +                                                             buildInputs seed]
                                +        return (opDef "StatelessTruncatedNormal"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "The shape of the output tensor."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "seed" description: "2 seeds (shape [2])." type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "Random values with specified shape."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  description: "The type of the output."
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Stops gradient computation.
                                +--
                                +-- When executed in a graph, this op outputs its input tensor as-is.
                                +-- 
                                +-- When building ops to compute gradients, this op prevents the contribution of
                                +-- its inputs to be taken into account.  Normally, the gradient generator adds ops
                                +-- to a graph to compute the derivatives of a specified 'loss' by recursively
                                +-- finding out inputs that contributed to its computation.  If you insert this op
                                +-- in the graph it inputs are masked from the gradient generator.  They are not
                                +-- taken into account for computing gradients.
                                +-- 
                                +-- This is useful any time you want to compute a value with TensorFlow but need
                                +-- to pretend that the value was a constant. Some examples include:
                                +-- 
                                +-- *  The *EM* algorithm where the *M-step* should not involve backpropagation
                                +--    through the output of the *E-step*.
                                +-- *  Contrastive divergence training of Boltzmann machines where, when
                                +--    differentiating the energy function, the training must not backpropagate
                                +--    through the graph that generated the samples from the model.
                                +-- *  Adversarial training, where no backprop should happen through the adversarial
                                +--    example generation process.
                                +stopGradient :: forall v'1 t . (TensorType t) => 
                                +                Tensor v'1 t -- ^ __input__
                                +                -> Tensor Build t -- ^ __output__
                                +stopGradient = stopGradient' id
                                +stopGradient' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                 Tensor v'1 t -- ^ __input__
                                +                 -> Tensor Build t -- ^ __output__
                                +stopGradient' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "StopGradient"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Return a strided slice from `input`.
                                +--
                                +-- Note, most python users will want to use the Python `Tensor.__getitem__`
                                +-- or `Variable.__getitem__` rather than this op directly.
                                +-- 
                                +-- The goal of this op is to produce a new tensor with a subset of
                                +-- the elements from the `n` dimensional `input` tensor. The subset is chosen using
                                +-- a sequence of `m` sparse range specifications encoded into the arguments
                                +-- of this function. Note, in some cases
                                +-- `m` could be equal to `n`, but this need not be the case. Each
                                +-- range specification entry can be one of the following:
                                +-- 
                                +-- - An ellipsis (...). Ellipses are used to imply zero or more
                                +--   dimensions of full-dimension selection and are produced using
                                +--   `ellipsis_mask`. For example, `foo[...]` is the identity slice.
                                +-- 
                                +-- - A new axis. This is used to insert a new shape=1 dimension and is
                                +--   produced using `new_axis_mask`. For example, `foo[:, ...]` where
                                +--   `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
                                +-- 
                                +-- 
                                +-- - A range `begin:end:stride`. This is used to specify how much to choose from
                                +--   a given dimension. `stride` can be any integer but 0.  `begin` is an integer
                                +--   which represents the index of the first value to select while `end` represents
                                +--   the index of the last value to select. The number of values selected in each
                                +--   dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
                                +--   `begin` and `end` can be negative where `-1` is the last element, `-2` is
                                +--   the second to last. `begin_mask` controls whether to replace the explicitly
                                +--   given `begin` with an implicit effective value of `0` if `stride > 0` and
                                +--   `-1` if `stride < 0`. `end_mask` is analogous but produces the number
                                +--   required to create the largest open interval. For example, given a shape
                                +--   `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
                                +--   not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
                                +--   and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
                                +--   first dimension of a tensor while dropping the last two (in the original
                                +--   order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
                                +-- 
                                +-- - A single index. This is used to keep only elements that have a given
                                +--   index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
                                +--   shape `(6,)` tensor. This is encoded in `begin` and `end` and
                                +--   `shrink_axis_mask`.
                                +-- 
                                +-- Each conceptual range specification is encoded in the op's argument. This
                                +-- encoding is best understand by considering a non-trivial example. In
                                +-- particular,
                                +-- `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
                                +-- 
                                +-- ```
                                +-- begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
                                +-- end = [2, 4, x, x, -3, x]
                                +-- strides = [1, 1, x, x, -1, 1]
                                +-- begin_mask = 1<<4 | 1 << 5 = 48
                                +-- end_mask = 1<<5 = 32
                                +-- ellipsis_mask = 1<<3 = 8
                                +-- new_axis_mask = 1<<2 4
                                +-- shrink_axis_mask = 1<<0
                                +-- ```
                                +-- 
                                +-- In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
                                +-- the slice becomes (2, 1, 5, 5, 2, 5).
                                +-- Let us walk step by step through each argument specification.
                                +-- 
                                +-- 1.  The first argument in the example slice is turned into `begin = 1` and
                                +-- `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
                                +-- also set the appropriate bit in `shrink_axis_mask`.
                                +-- 
                                +-- 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
                                +-- zero bits contributed.
                                +-- 
                                +-- 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
                                +-- dimension in the final shape. Dummy values are contributed to begin,
                                +-- end and stride, while the new_axis_mask bit is set.
                                +-- 
                                +-- 4. `...` grab the full ranges from as many dimensions as needed to
                                +-- fully specify a slice for every dimension of the input shape.
                                +-- 
                                +-- 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
                                +-- with a dimension that has shape `s` is converted to a positive index
                                +-- `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
                                +-- is done internally so begin, end and strides receive x, -3, and -1.
                                +-- The appropriate begin_mask bit is set to indicate the start range is the
                                +-- full range (ignoring the x).
                                +-- 
                                +-- 6. `:` indicates that the entire contents of the corresponding dimension
                                +-- is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
                                +-- receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
                                +-- `end_mask` are also set.
                                +-- 
                                +-- *Requirements*:
                                +--   `0 != strides[i] for i in [0, m)`
                                +--   `ellipsis_mask must be a power of two (only one ellipsis)`
                                +stridedSlice :: forall v'1 v'2 v'3 v'4 t index . (TensorType t,
                                +                                                  OneOf '[Data.Int.Int32,
                                +                                                          Data.Int.Int64] index) =>
                                +                
                                +                Tensor v'1 t -- ^ __input__
                                +                -> Tensor v'2 index -- ^ __begin__: `begin[k]` specifies the offset into the `k`th range specification.
                                +                                    -- The exact dimension this corresponds to will be determined by context.
                                +                                    -- Out-of-bounds values will be silently clamped. If the `k`th bit of
                                +                                    -- `begin_mask` then `begin[k]` is ignored and the full range of the
                                +                                    -- appropriate dimension is used instead. Negative values causes indexing
                                +                                    -- to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
                                +                -> Tensor v'3 index -- ^ __end__: `end[i]` is like `begin` with the exception that `end_mask` is
                                +                                    -- used to determine full ranges.
                                +                -> Tensor v'4 index -- ^ __strides__: `strides[i]` specifies the increment in the `i`th specification
                                +                                    -- after extracting a given element. Negative indices will reverse
                                +                                    -- the original order. Out or range values are
                                +                                    -- clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
                                +                -> Tensor Build t -- ^ __output__
                                +stridedSlice = stridedSlice' id
                                +stridedSlice' :: forall v'1 v'2 v'3 v'4 t index . (TensorType t,
                                +                                                   OneOf '[Data.Int.Int32,
                                +                                                           Data.Int.Int64] index) =>
                                +                 OpParams ->
                                +                 Tensor v'1 t -- ^ __input__
                                +                 -> Tensor v'2 index -- ^ __begin__: `begin[k]` specifies the offset into the `k`th range specification.
                                +                                     -- The exact dimension this corresponds to will be determined by context.
                                +                                     -- Out-of-bounds values will be silently clamped. If the `k`th bit of
                                +                                     -- `begin_mask` then `begin[k]` is ignored and the full range of the
                                +                                     -- appropriate dimension is used instead. Negative values causes indexing
                                +                                     -- to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
                                +                 -> Tensor v'3 index -- ^ __end__: `end[i]` is like `begin` with the exception that `end_mask` is
                                +                                     -- used to determine full ranges.
                                +                 -> Tensor v'4 index -- ^ __strides__: `strides[i]` specifies the increment in the `i`th specification
                                +                                     -- after extracting a given element. Negative indices will reverse
                                +                                     -- the original order. Out or range values are
                                +                                     -- clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
                                +                 -> Tensor Build t -- ^ __output__
                                +stridedSlice' op'options input begin end strides | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs begin,
                                +                                                             buildInputs end,
                                +                                                             buildInputs strides]
                                +        return (opDef "StridedSlice"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Index" .~ tensorType (undefined :: index)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg {
                                +  name: "begin"
                                +  description: "`begin[k]` specifies the offset into the `k`th range specification.\nThe exact dimension this corresponds to will be determined by context.\nOut-of-bounds values will be silently clamped. If the `k`th bit of\n`begin_mask` then `begin[k]` is ignored and the full range of the\nappropriate dimension is used instead. Negative values causes indexing\nto start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`."
                                +  type_attr: "Index"
                                +}
                                +input_arg {
                                +  name: "end"
                                +  description: "`end[i]` is like `begin` with the exception that `end_mask` is\nused to determine full ranges."
                                +  type_attr: "Index"
                                +}
                                +input_arg {
                                +  name: "strides"
                                +  description: "`strides[i]` specifies the increment in the `i`th specification\nafter extracting a given element. Negative indices will reverse\nthe original order. Out or range values are\nclamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`"
                                +  type_attr: "Index"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Index"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr {
                                +  name: "begin_mask"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "a bitmask where a bit i being 1 means to ignore the begin\nvalue and instead use the largest interval possible. At runtime\nbegin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or\n`[-1, n-1]` if `stride[i] < 0`"
                                +}
                                +attr {
                                +  name: "end_mask"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "analogous to `begin_mask`"
                                +}
                                +attr {
                                +  name: "ellipsis_mask"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "a bitmask where bit `i` being 1 means the `i`th\nposition is actually an ellipsis. One bit at most can be 1.\nIf `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`\nis provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis\nimplicitly creates as many range specifications as necessary to fully\nspecify the sliced range for every dimension. For example for a 4-dimensional\ntensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`."
                                +}
                                +attr {
                                +  name: "new_axis_mask"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "a bitmask where bit `i` being 1 means the `i`th\nspecification creates a new shape 1 dimension. For example\n`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor."
                                +}
                                +attr {
                                +  name: "shrink_axis_mask"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "a bitmask where bit `i` implies that the `i`th\nspecification should shrink the dimensionality. begin and end\nmust imply a slice of size 1 in the dimension. For example in\npython one might do `foo[:, 3, :]` which would result in\n`shrink_axis_mask` being 2."
                                +}
                                +-}
                                +
                                +-- | Assign `value` to the sliced l-value reference of `ref`.
                                +--
                                +-- The values of `value` are assigned to the positions in the variable
                                +-- `ref` that are selected by the slice parameters. The slice parameters
                                +-- `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
                                +-- 
                                +-- NOTE this op currently does not support broadcasting and so `value`'s
                                +-- shape must be exactly the shape produced by the slice of `ref`.
                                +stridedSliceAssign :: forall v'2 v'3 v'4 v'5 t index m' . (MonadBuild m',
                                +                                                           TensorType t,
                                +                                                           OneOf '[Data.Int.Int32,
                                +                                                                   Data.Int.Int64] index) =>
                                +                      
                                +                      Tensor Ref t -- ^ __ref__
                                +                      -> Tensor v'2 index -- ^ __begin__
                                +                      -> Tensor v'3 index -- ^ __end__
                                +                      -> Tensor v'4 index -- ^ __strides__
                                +                      -> Tensor v'5 t -- ^ __value__
                                +                      -> m' (Tensor Ref t) -- ^ __output_ref__
                                +stridedSliceAssign = stridedSliceAssign' id
                                +stridedSliceAssign' :: forall v'2 v'3 v'4 v'5 t index m' . (MonadBuild m',
                                +                                                            TensorType t,
                                +                                                            OneOf '[Data.Int.Int32,
                                +                                                                    Data.Int.Int64] index) =>
                                +                       OpParams ->
                                +                       Tensor Ref t -- ^ __ref__
                                +                       -> Tensor v'2 index -- ^ __begin__
                                +                       -> Tensor v'3 index -- ^ __end__
                                +                       -> Tensor v'4 index -- ^ __strides__
                                +                       -> Tensor v'5 t -- ^ __value__
                                +                       -> m' (Tensor Ref t) -- ^ __output_ref__
                                +stridedSliceAssign' op'options ref begin end strides value | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref,
                                +                                                             buildInputs begin,
                                +                                                             buildInputs end,
                                +                                                             buildInputs strides,
                                +                                                             buildInputs value]
                                +        buildOp [] (opDef "StridedSliceAssign"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "Index" .~ tensorType (undefined :: index)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "ref" type_attr: "T" is_ref: true }
                                +input_arg { name: "begin" type_attr: "Index" }
                                +input_arg { name: "end" type_attr: "Index" }
                                +input_arg { name: "strides" type_attr: "Index" }
                                +input_arg { name: "value" type_attr: "T" }
                                +output_arg { name: "output_ref" type_attr: "T" is_ref: true }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Index"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr { name: "begin_mask" type: "int" default_value { i: 0 } }
                                +attr { name: "end_mask" type: "int" default_value { i: 0 } }
                                +attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } }
                                +attr { name: "new_axis_mask" type: "int" default_value { i: 0 } }
                                +attr {
                                +  name: "shrink_axis_mask" type: "int" default_value { i: 0 }
                                +}
                                +-}
                                +
                                +-- | Returns the gradient of `StridedSlice`.
                                +--
                                +-- Since `StridedSlice` cuts out pieces of its `input` which is size
                                +-- `shape`, its gradient will have the same shape (which is passed here
                                +-- as `shape`). The gradient will be zero in any element that the slice
                                +-- does not select.
                                +-- 
                                +-- Arguments are the same as StridedSliceGrad with the exception that
                                +-- `dy` is the input gradient to be propagated and `shape` is the
                                +-- shape of `StridedSlice`'s `input`.
                                +stridedSliceGrad :: forall v'1 v'2 v'3 v'4 v'5 t index . (TensorType t,
                                +                                                          OneOf '[Data.Int.Int32,
                                +                                                                  Data.Int.Int64] index) =>
                                +                    
                                +                    Tensor v'1 index -- ^ __shape__
                                +                    -> Tensor v'2 index -- ^ __begin__
                                +                    -> Tensor v'3 index -- ^ __end__
                                +                    -> Tensor v'4 index -- ^ __strides__
                                +                    -> Tensor v'5 t -- ^ __dy__
                                +                    -> Tensor Build t -- ^ __output__
                                +stridedSliceGrad = stridedSliceGrad' id
                                +stridedSliceGrad' :: forall v'1 v'2 v'3 v'4 v'5 t index . (TensorType t,
                                +                                                           OneOf '[Data.Int.Int32,
                                +                                                                   Data.Int.Int64] index) =>
                                +                     OpParams ->
                                +                     Tensor v'1 index -- ^ __shape__
                                +                     -> Tensor v'2 index -- ^ __begin__
                                +                     -> Tensor v'3 index -- ^ __end__
                                +                     -> Tensor v'4 index -- ^ __strides__
                                +                     -> Tensor v'5 t -- ^ __dy__
                                +                     -> Tensor Build t -- ^ __output__
                                +stridedSliceGrad' op'options shape begin end strides dy | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape,
                                +                                                             buildInputs begin,
                                +                                                             buildInputs end,
                                +                                                             buildInputs strides,
                                +                                                             buildInputs dy]
                                +        return (opDef "StridedSliceGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Index" .~ tensorType (undefined :: index)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "shape" type_attr: "Index" }
                                +input_arg { name: "begin" type_attr: "Index" }
                                +input_arg { name: "end" type_attr: "Index" }
                                +input_arg { name: "strides" type_attr: "Index" }
                                +input_arg { name: "dy" type_attr: "T" }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Index"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +attr { name: "begin_mask" type: "int" default_value { i: 0 } }
                                +attr { name: "end_mask" type: "int" default_value { i: 0 } }
                                +attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } }
                                +attr { name: "new_axis_mask" type: "int" default_value { i: 0 } }
                                +attr {
                                +  name: "shrink_axis_mask" type: "int" default_value { i: 0 }
                                +}
                                +-}
                                +
                                +-- | Joins the strings in the given list of string tensors into one tensor;
                                +--
                                +-- with the given separator (default is an empty separator).
                                +stringJoin :: 
                                +              [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__: A list of string tensors.  The tensors must all have the same shape,
                                +                                                      -- or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
                                +                                                      -- of non-scalar inputs.
                                +              -> Tensor Build Data.ByteString.ByteString -- ^ __output__
                                +stringJoin = stringJoin' id
                                +stringJoin' :: OpParams ->
                                +               [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__: A list of string tensors.  The tensors must all have the same shape,
                                +                                                       -- or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
                                +                                                       -- of non-scalar inputs.
                                +               -> Tensor Build Data.ByteString.ByteString -- ^ __output__
                                +stringJoin' op'options
                                +            inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs]
                                +        return (opDef "StringJoin"
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length inputs) :: Int64
                                +{-
                                +input_arg {
                                +  name: "inputs"
                                +  description: "A list of string tensors.  The tensors must all have the same shape,\nor be scalars.  Scalars may be mixed in; these will be broadcast to the shape\nof non-scalar inputs."
                                +  type: DT_STRING
                                +  number_attr: "N"
                                +}
                                +output_arg { name: "output" type: DT_STRING }
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +attr {
                                +  name: "separator"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "string, an optional join separator."
                                +}
                                +-}
                                +
                                +-- | Split elements of `input` based on `delimiter` into a `SparseTensor`.
                                +--
                                +-- Let N be the size of source (typically N will be the batch size). Split each
                                +-- element of `input` based on `delimiter` and return a `SparseTensor`
                                +-- containing the splitted tokens. Empty tokens are ignored.
                                +-- 
                                +-- `delimiter` can be empty, or a string of split characters. If `delimiter` is an
                                +--  empty string, each element of `input` is split into individual single-byte
                                +--  character strings, including splitting of UTF-8 multibyte sequences. Otherwise
                                +--  every character of `delimiter` is a potential split point.
                                +-- 
                                +-- For example:
                                +--   N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
                                +--   will be
                                +-- 
                                +--   indices = [0, 0;
                                +--              0, 1;
                                +--              1, 0;
                                +--              1, 1;
                                +--              1, 2]
                                +--   shape = [2, 3]
                                +--   values = ['hello', 'world', 'a', 'b', 'c']
                                +stringSplit :: 
                                +               Tensor v'1 Data.ByteString.ByteString -- ^ __input__: 1-D. Strings to split.
                                +               -> Tensor v'2 Data.ByteString.ByteString -- ^ __delimiter__: 0-D. Delimiter characters (bytes), or empty string.
                                +               -> (Tensor Build Data.Int.Int64,
                                +                   Tensor Build Data.ByteString.ByteString,
                                +                   Tensor Build Data.Int.Int64)
                                +               -- ^ (__indices__, __values__, __shape__)
                                +               --
                                +               -- * __indices__: A dense matrix of int64 representing the indices of the sparse tensor.
                                +               --
                                +               -- * __values__: A vector of strings corresponding to the splited values.
                                +               --
                                +               -- * __shape__: a length-2 vector of int64 representing the shape of the sparse
                                +               -- tensor, where the first value is N and the second value is the maximum number
                                +               -- of tokens in a single input entry.
                                +stringSplit = stringSplit' id
                                +stringSplit' :: OpParams ->
                                +                Tensor v'1 Data.ByteString.ByteString -- ^ __input__: 1-D. Strings to split.
                                +                -> Tensor v'2 Data.ByteString.ByteString -- ^ __delimiter__: 0-D. Delimiter characters (bytes), or empty string.
                                +                -> (Tensor Build Data.Int.Int64,
                                +                    Tensor Build Data.ByteString.ByteString,
                                +                    Tensor Build Data.Int.Int64)
                                +                -- ^ (__indices__, __values__, __shape__)
                                +                --
                                +                -- * __indices__: A dense matrix of int64 representing the indices of the sparse tensor.
                                +                --
                                +                -- * __values__: A vector of strings corresponding to the splited values.
                                +                --
                                +                -- * __shape__: a length-2 vector of int64 representing the shape of the sparse
                                +                -- tensor, where the first value is N and the second value is the maximum number
                                +                -- of tokens in a single input entry.
                                +stringSplit' op'options input delimiter | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs delimiter]
                                +        return (opDef "StringSplit"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "1-D. Strings to split." type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "delimiter"
                                +  description: "0-D. Delimiter characters (bytes), or empty string."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "indices"
                                +  description: "A dense matrix of int64 representing the indices of the sparse tensor."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "values"
                                +  description: "A vector of strings corresponding to the splited values."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "shape"
                                +  description: "a length-2 vector of int64 representing the shape of the sparse\ntensor, where the first value is N and the second value is the maximum number\nof tokens in a single input entry."
                                +  type: DT_INT64
                                +}
                                +-}
                                +
                                +-- | Converts each string in the input Tensor to its hash mod by a number of buckets.
                                +--
                                +-- The hash function is deterministic on the content of the string within the
                                +-- process.
                                +-- 
                                +-- Note that the hash function may change from time to time.
                                +-- This functionality will be deprecated and it's recommended to use
                                +-- `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
                                +stringToHashBucket :: 
                                +                      Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
                                +                      -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__
                                +                      -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
                                +stringToHashBucket = stringToHashBucket' id
                                +stringToHashBucket' :: OpParams ->
                                +                       Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
                                +                       -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__
                                +                       -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
                                +stringToHashBucket' op'options num_buckets string_tensor | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_tensor]
                                +        return (opDef "StringToHashBucket"
                                +                & opAttr "num_buckets" .~ num_buckets
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "string_tensor" type: DT_STRING }
                                +output_arg {
                                +  name: "output"
                                +  description: "A Tensor of the same shape as the input `string_tensor`."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "num_buckets"
                                +  type: "int"
                                +  description: "The number of buckets."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Converts each string in the input Tensor to its hash mod by a number of buckets.
                                +--
                                +-- The hash function is deterministic on the content of the string within the
                                +-- process and will never change. However, it is not suitable for cryptography.
                                +-- This function may be used when CPU time is scarce and inputs are trusted or
                                +-- unimportant. There is a risk of adversaries constructing inputs that all hash
                                +-- to the same bucket. To prevent this problem, use a strong hash function with
                                +-- `tf.string_to_hash_bucket_strong`.
                                +stringToHashBucketFast :: 
                                +                          Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
                                +                          -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket.
                                +                          -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
                                +stringToHashBucketFast = stringToHashBucketFast' id
                                +stringToHashBucketFast' :: OpParams ->
                                +                           Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
                                +                           -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket.
                                +                           -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
                                +stringToHashBucketFast' op'options num_buckets input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "StringToHashBucketFast"
                                +                & opAttr "num_buckets" .~ num_buckets
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "The strings to assign a hash bucket."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A Tensor of the same shape as the input `string_tensor`."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "num_buckets"
                                +  type: "int"
                                +  description: "The number of buckets."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Converts each string in the input Tensor to its hash mod by a number of buckets.
                                +--
                                +-- The hash function is deterministic on the content of the string within the
                                +-- process. The hash function is a keyed hash function, where attribute `key`
                                +-- defines the key of the hash function. `key` is an array of 2 elements.
                                +-- 
                                +-- A strong hash is important when inputs may be malicious, e.g. URLs with
                                +-- additional components. Adversaries could try to make their inputs hash to the
                                +-- same bucket for a denial-of-service attack or to skew the results. A strong
                                +-- hash prevents this by making it difficult, if not infeasible, to compute inputs
                                +-- that hash to the same bucket. This comes at a cost of roughly 4x higher compute
                                +-- time than `tf.string_to_hash_bucket_fast`.
                                +stringToHashBucketStrong :: 
                                +                            Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
                                +                            -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket.
                                +                            -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
                                +stringToHashBucketStrong = stringToHashBucketStrong' id
                                +stringToHashBucketStrong' :: OpParams ->
                                +                             Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
                                +                             -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket.
                                +                             -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
                                +stringToHashBucketStrong' op'options num_buckets input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "StringToHashBucketStrong"
                                +                & opAttr "num_buckets" .~ num_buckets
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "The strings to assign a hash bucket."
                                +  type: DT_STRING
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A Tensor of the same shape as the input `string_tensor`."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "num_buckets"
                                +  type: "int"
                                +  description: "The number of buckets."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "key"
                                +  type: "list(int)"
                                +  description: "The key for the keyed hash function passed as a list of two uint64\nelements."
                                +}
                                +-}
                                +
                                +-- | Converts each string in the input Tensor to the specified numeric type.
                                +--
                                +-- (Note that int32 overflow results in an error while float overflow
                                +-- results in a rounded value.)
                                +stringToNumber :: forall v'1 out_type . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                                 Double, Float] out_type) => 
                                +                  Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__
                                +                  -> Tensor Build out_type -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
                                +stringToNumber = stringToNumber' id
                                +stringToNumber' :: forall v'1 out_type . (OneOf '[Data.Int.Int32,
                                +                                                  Data.Int.Int64, Double,
                                +                                                  Float] out_type) =>
                                +                   OpParams ->
                                +                   Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__
                                +                   -> Tensor Build out_type -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
                                +stringToNumber' op'options string_tensor | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_tensor]
                                +        return (opDef "StringToNumber"
                                +                & opAttr "out_type" .~ tensorType (undefined :: out_type)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "string_tensor" type: DT_STRING }
                                +output_arg {
                                +  name: "output"
                                +  description: "A Tensor of the same shape as the input `string_tensor`."
                                +  type_attr: "out_type"
                                +}
                                +attr {
                                +  name: "out_type"
                                +  type: "type"
                                +  default_value { type: DT_FLOAT }
                                +  description: "The numeric type to interpret each string in `string_tensor` as."
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns x - y element-wise.
                                +--
                                +-- *NOTE*: `Sub` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +sub :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                   (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                   Data.Int.Int64, Data.Word.Word16, Double,
                                +                                   Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor v'2 t -- ^ __y__
                                +       -> Tensor Build t -- ^ __z__
                                +sub = sub' id
                                +sub' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                    (Data.Complex.Complex Float),
                                +                                    Data.Int.Int32, Data.Int.Int64,
                                +                                    Data.Word.Word16, Double, Float] t) =>
                                +        OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor v'2 t -- ^ __y__
                                +        -> Tensor Build t -- ^ __z__
                                +sub' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "Sub"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Return substrings from `Tensor` of strings.
                                +--
                                +-- For each string in the input `Tensor`, creates a substring starting at index
                                +-- `pos` with a total length of `len`.
                                +-- 
                                +-- If `len` defines a substring that would extend beyond the length of the input
                                +-- string, then as many characters as possible are used.
                                +-- 
                                +-- If `pos` is negative or specifies a character index larger than any of the input
                                +-- strings, then an `InvalidArgumentError` is thrown.
                                +-- 
                                +-- `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
                                +-- Op creation.
                                +-- 
                                +-- *NOTE*: `Substr` supports broadcasting up to two dimensions. More about
                                +-- broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +-- 
                                +-- ---
                                +-- 
                                +-- Examples
                                +-- 
                                +-- Using scalar `pos` and `len`:
                                +-- 
                                +-- ```python
                                +-- input = [b'Hello', b'World']
                                +-- position = 1
                                +-- length = 3
                                +-- 
                                +-- output = [b'ell', b'orl']
                                +-- ```
                                +-- 
                                +-- Using `pos` and `len` with same shape as `input`:
                                +-- 
                                +-- ```python
                                +-- input = [[b'ten', b'eleven', b'twelve'],
                                +--          [b'thirteen', b'fourteen', b'fifteen'],
                                +--          [b'sixteen', b'seventeen', b'eighteen']]
                                +-- position = [[1, 2, 3],
                                +--             [1, 2, 3],
                                +--             [1, 2, 3]]
                                +-- length =   [[2, 3, 4],
                                +--             [4, 3, 2],
                                +--             [5, 5, 5]]
                                +-- 
                                +-- output = [[b'en', b'eve', b'lve'],
                                +--           [b'hirt', b'urt', b'te'],
                                +--           [b'ixtee', b'vente', b'hteen']]
                                +-- ```
                                +-- 
                                +-- Broadcasting `pos` and `len` onto `input`:
                                +-- 
                                +-- ```
                                +-- input = [[b'ten', b'eleven', b'twelve'],
                                +--          [b'thirteen', b'fourteen', b'fifteen'],
                                +--          [b'sixteen', b'seventeen', b'eighteen'],
                                +--          [b'nineteen', b'twenty', b'twentyone']]
                                +-- position = [1, 2, 3]
                                +-- length =   [1, 2, 3]
                                +-- 
                                +-- output = [[b'e', b'ev', b'lve'],
                                +--           [b'h', b'ur', b'tee'],
                                +--           [b'i', b've', b'hte'],
                                +--           [b'i', b'en', b'nty']]
                                +-- ```
                                +-- 
                                +-- Broadcasting `input` onto `pos` and `len`:
                                +-- 
                                +-- ```
                                +-- input = b'thirteen'
                                +-- position = [1, 5, 7]
                                +-- length =   [3, 2, 1]
                                +-- 
                                +-- output = [b'hir', b'ee', b'n"]
                                +-- ```
                                +substr :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => 
                                +          Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Tensor of strings
                                +          -> Tensor v'2 t -- ^ __pos__: Scalar defining the position of first character in each substring
                                +          -> Tensor v'3 t -- ^ __len__: Scalar defining the number of characters to include in each substring
                                +          -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Tensor of substrings
                                +substr = substr' id
                                +substr' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) =>
                                +           OpParams ->
                                +           Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Tensor of strings
                                +           -> Tensor v'2 t -- ^ __pos__: Scalar defining the position of first character in each substring
                                +           -> Tensor v'3 t -- ^ __len__: Scalar defining the number of characters to include in each substring
                                +           -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Tensor of substrings
                                +substr' op'options input pos len | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs pos,
                                +                                                             buildInputs len]
                                +        return (opDef "Substr"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "Tensor of strings" type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "pos"
                                +  description: "Scalar defining the position of first character in each substring"
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "len"
                                +  description: "Scalar defining the number of characters to include in each substring"
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output" description: "Tensor of substrings" type: DT_STRING
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the sum of elements across dimensions of a tensor.
                                +--
                                +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
                                +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
                                +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
                                +-- retained with length 1.
                                +sum :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Int.Int16, Data.Int.Int32,
                                +                                        Data.Int.Int64, Data.Int.Int8,
                                +                                        Data.Word.Word16, Data.Word.Word8,
                                +                                        Double, Float] t,
                                +                                OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
                                +       
                                +       Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +       -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +       -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +sum = sum' id
                                +sum' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Int.Int16, Data.Int.Int32,
                                +                                         Data.Int.Int64, Data.Int.Int8,
                                +                                         Data.Word.Word16, Data.Word.Word8,
                                +                                         Double, Float] t,
                                +                                 OneOf '[Data.Int.Int32,
                                +                                         Data.Int.Int64] tidx) => OpParams ->
                                +        Tensor v'1 t -- ^ __input__: The tensor to reduce.
                                +        -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
                                +        -> Tensor Build t -- ^ __output__: The reduced tensor.
                                +sum' op'options input reduction_indices | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs reduction_indices]
                                +        return (opDef "Sum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tidx" .~ tensorType (undefined :: tidx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The tensor to reduce." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "reduction_indices"
                                +  description: "The dimensions to reduce."
                                +  type_attr: "Tidx"
                                +}
                                +output_arg {
                                +  name: "output" description: "The reduced tensor." type_attr: "T"
                                +}
                                +attr {
                                +  name: "keep_dims"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, retain reduced dimensions with length 1."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tidx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the singular value decompositions of one or more matrices.
                                +--
                                +-- Computes the SVD of each inner matrix in `input` such that
                                +-- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
                                +-- 
                                +-- ```python
                                +-- # a is a tensor containing a batch of matrices.
                                +-- # s is a tensor of singular values for each matrix.
                                +-- # u is the tensor containing of left singular vectors for each matrix.
                                +-- # v is the tensor containing of right singular vectors for each matrix.
                                +-- s, u, v = svd(a)
                                +-- s, _, _ = svd(a, compute_uv=False)
                                +-- ```
                                +svd :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Double,
                                +                               Float] t) => 
                                +       Tensor v'1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
                                +                    -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
                                +       -> (Tensor Build t, Tensor Build t, Tensor Build t)
                                +       -- ^ (__s__, __u__, __v__)
                                +       --
                                +       -- * __s__: Singular values. Shape is `[..., P]`.
                                +       --
                                +       -- * __u__: Left singular vectors. If `full_matrices` is `False` then shape is
                                +       -- `[..., M, P]`; if `full_matrices` is `True` then shape is
                                +       -- `[..., M, M]`. Undefined if `compute_uv` is `False`.
                                +       --
                                +       -- * __v__: Left singular vectors. If `full_matrices` is `False` then shape is
                                +       -- `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
                                +       -- Undefined if `compute_uv` is false.
                                +svd = svd' id
                                +svd' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Double,
                                +                                Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
                                +                     -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
                                +        -> (Tensor Build t, Tensor Build t, Tensor Build t)
                                +        -- ^ (__s__, __u__, __v__)
                                +        --
                                +        -- * __s__: Singular values. Shape is `[..., P]`.
                                +        --
                                +        -- * __u__: Left singular vectors. If `full_matrices` is `False` then shape is
                                +        -- `[..., M, P]`; if `full_matrices` is `True` then shape is
                                +        -- `[..., M, M]`. Undefined if `compute_uv` is `False`.
                                +        --
                                +        -- * __v__: Left singular vectors. If `full_matrices` is `False` then shape is
                                +        -- `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
                                +        -- Undefined if `compute_uv` is false.
                                +svd' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Svd"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "s"
                                +  description: "Singular values. Shape is `[..., P]`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "u"
                                +  description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`. Undefined if `compute_uv` is `False`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "v"
                                +  description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.\nUndefined if `compute_uv` is false."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "compute_uv"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If true, left and right singular vectors will be\ncomputed and returned in `u` and `v`, respectively.\nIf false, `u` and `v` are not set and should never referenced."
                                +}
                                +attr {
                                +  name: "full_matrices"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If true, compute full-sized `u` and `v`. If false\n(the default), compute only the leading `P` singular vectors.\nIgnored if `compute_uv` is `False`."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_DOUBLE
                                +      type: DT_FLOAT
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Forwards `data` to the output port determined by `pred`.
                                +--
                                +-- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
                                +-- the data goes to `output_false`.
                                +-- 
                                +-- See also `RefSwitch` and `Merge`.
                                +switch :: forall v'1 v'2 t . (TensorType t) => 
                                +          Tensor v'1 t -- ^ __data__: The tensor to be forwarded to the appropriate output.
                                +          -> Tensor v'2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data.
                                +          -> (Tensor Build t, Tensor Build t)
                                +          -- ^ (__output_false__, __output_true__)
                                +          --
                                +          -- * __output_false__: If `pred` is false, data will be forwarded to this output.
                                +          --
                                +          -- * __output_true__: If `pred` is true, data will be forwarded to this output.
                                +switch = switch' id
                                +switch' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +           Tensor v'1 t -- ^ __data__: The tensor to be forwarded to the appropriate output.
                                +           -> Tensor v'2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data.
                                +           -> (Tensor Build t, Tensor Build t)
                                +           -- ^ (__output_false__, __output_true__)
                                +           --
                                +           -- * __output_false__: If `pred` is false, data will be forwarded to this output.
                                +           --
                                +           -- * __output_true__: If `pred` is true, data will be forwarded to this output.
                                +switch' op'options data' pred | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs pred]
                                +        return (opDef "Switch"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "data"
                                +  description: "The tensor to be forwarded to the appropriate output."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "pred"
                                +  description: "A scalar that specifies which output port will receive data."
                                +  type: DT_BOOL
                                +}
                                +output_arg {
                                +  name: "output_false"
                                +  description: "If `pred` is false, data will be forwarded to this output."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output_true"
                                +  description: "If `pred` is true, data will be forwarded to this output."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Creates a dataset that emits the records from one or more TFRecord files.
                                +
                                +tFRecordDataset :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                   Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__: A scalar or vector containing the name(s) of the file(s) to be
                                +                                                         -- read.
                                +                   -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__: A scalar containing either (i) the empty string (no
                                +                                                            -- compression), (ii) "ZLIB", or (iii) "GZIP".
                                +                   -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +tFRecordDataset = tFRecordDataset' id
                                +tFRecordDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                    Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__: A scalar or vector containing the name(s) of the file(s) to be
                                +                                                          -- read.
                                +                    -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__: A scalar containing either (i) the empty string (no
                                +                                                             -- compression), (ii) "ZLIB", or (iii) "GZIP".
                                +                    -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +tFRecordDataset' op'options filenames compression_type | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filenames,
                                +                                                             buildInputs compression_type]
                                +        buildOp [] (opDef "TFRecordDataset"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "filenames"
                                +  description: "A scalar or vector containing the name(s) of the file(s) to be\nread."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "compression_type"
                                +  description: "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\"."
                                +  type: DT_STRING
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +-}
                                +
                                +-- | A Reader that outputs the records from a TensorFlow Records file.
                                +
                                +tFRecordReader :: forall m' . (MonadBuild m') => 
                                +                  m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +tFRecordReader = tFRecordReader' id
                                +tFRecordReader' :: forall m' . (MonadBuild m') => OpParams ->
                                +                   m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +tFRecordReader' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "TFRecordReader"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +attr {
                                +  name: "compression_type" type: "string" default_value { s: "" }
                                +}
                                +-}
                                +
                                +-- | A Reader that outputs the records from a TensorFlow Records file.
                                +
                                +tFRecordReaderV2 :: forall m' . (MonadBuild m') => 
                                +                    m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +tFRecordReaderV2 = tFRecordReaderV2' id
                                +tFRecordReaderV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                     m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +tFRecordReaderV2' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "TFRecordReaderV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +attr {
                                +  name: "compression_type" type: "string" default_value { s: "" }
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that contains `count` elements from the `input_dataset`.
                                +
                                +takeDataset :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +               [DataType] -- ^ __output_types__
                                +               -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +               -> Tensor v'2 Data.Int.Int64 -- ^ __count__: A scalar representing the number of elements from the `input_dataset`
                                +                                            -- that should be taken. A value of `-1` indicates that all of `input_dataset`
                                +                                            -- is taken.
                                +               -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +takeDataset = takeDataset' id
                                +takeDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                [DataType] -- ^ __output_types__
                                +                -> Tensor v'1 ResourceHandle -- ^ __input_dataset__
                                +                -> Tensor v'2 Data.Int.Int64 -- ^ __count__: A scalar representing the number of elements from the `input_dataset`
                                +                                             -- that should be taken. A value of `-1` indicates that all of `input_dataset`
                                +                                             -- is taken.
                                +                -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +takeDataset' op'options output_types input_dataset count | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset,
                                +                                                             buildInputs count]
                                +        buildOp [] (opDef "TakeDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input_dataset" type: DT_RESOURCE }
                                +input_arg {
                                +  name: "count"
                                +  description: "A scalar representing the number of elements from the `input_dataset`\nthat should be taken. A value of `-1` indicates that all of `input_dataset`\nis taken."
                                +  type: DT_INT64
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
                                +--
                                +-- The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
                                +-- `N` is the minibatch size and the rows correspond to the output handles of
                                +-- `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`.  The ranks of the
                                +-- original `SparseTensor` objects that went into the given input ops must all
                                +-- match.  When the final `SparseTensor` is created, it has rank one
                                +-- higher than the ranks of the incoming `SparseTensor` objects
                                +-- (they have been concatenated along a new row dimension on the left).
                                +-- 
                                +-- The output `SparseTensor` object's shape values for all dimensions but the
                                +-- first are the max across the input `SparseTensor` objects' shape values
                                +-- for the corresponding dimensions.  Its first shape value is `N`, the minibatch
                                +-- size.
                                +-- 
                                +-- The input `SparseTensor` objects' indices are assumed ordered in
                                +-- standard lexicographic order.  If this is not the case, after this
                                +-- step run `SparseReorder` to restore index ordering.
                                +-- 
                                +-- For example, if the handles represent an input, which is a `[2, 3]` matrix
                                +-- representing two original `SparseTensor` objects:
                                +-- 
                                +-- ```
                                +--     index = [ 0]
                                +--             [10]
                                +--             [20]
                                +--     values = [1, 2, 3]
                                +--     shape = [50]
                                +-- ```
                                +-- 
                                +-- and
                                +-- 
                                +-- ```
                                +--     index = [ 2]
                                +--             [10]
                                +--     values = [4, 5]
                                +--     shape = [30]
                                +-- ```
                                +-- 
                                +-- then the final `SparseTensor` will be:
                                +-- 
                                +-- ```
                                +--     index = [0  0]
                                +--             [0 10]
                                +--             [0 20]
                                +--             [1  2]
                                +--             [1 10]
                                +--     values = [1, 2, 3, 4, 5]
                                +--     shape = [2 50]
                                +-- ```
                                +takeManySparseFromTensorsMap :: forall v'1 dtype m' . (MonadBuild m',
                                +                                                       TensorType dtype) => 
                                +                                Tensor v'1 Data.Int.Int64 -- ^ __sparse_handles__: 1-D, The `N` serialized `SparseTensor` objects.
                                +                                                          -- Shape: `[N]`.
                                +                                -> m' ((Tensor Value Data.Int.Int64,
                                +                                        Tensor Value dtype,
                                +                                        Tensor Value Data.Int.Int64))
                                +                                -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__)
                                +                                --
                                +                                -- * __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
                                +                                --
                                +                                -- * __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
                                +                                --
                                +                                -- * __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
                                +takeManySparseFromTensorsMap = takeManySparseFromTensorsMap' id
                                +takeManySparseFromTensorsMap' :: forall v'1 dtype m' . (MonadBuild m',
                                +                                                        TensorType dtype) =>
                                +                                 OpParams ->
                                +                                 Tensor v'1 Data.Int.Int64 -- ^ __sparse_handles__: 1-D, The `N` serialized `SparseTensor` objects.
                                +                                                           -- Shape: `[N]`.
                                +                                 -> m' ((Tensor Value Data.Int.Int64,
                                +                                         Tensor Value dtype,
                                +                                         Tensor Value Data.Int.Int64))
                                +                                 -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__)
                                +                                 --
                                +                                 -- * __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
                                +                                 --
                                +                                 -- * __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
                                +                                 --
                                +                                 -- * __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
                                +takeManySparseFromTensorsMap' op'options sparse_handles | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_handles]
                                +        buildOp [] (opDef "TakeManySparseFromTensorsMap"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "sparse_handles"
                                +  description: "1-D, The `N` serialized `SparseTensor` objects.\nShape: `[N]`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sparse_indices"
                                +  description: "2-D.  The `indices` of the minibatch `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sparse_values"
                                +  description: "1-D.  The `values` of the minibatch `SparseTensor`."
                                +  type_attr: "dtype"
                                +}
                                +output_arg {
                                +  name: "sparse_shape"
                                +  description: "1-D.  The `shape` of the minibatch `SparseTensor`."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The `dtype` of the `SparseTensor` objects stored in the\n`SparseTensorsMap`."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "The container name for the `SparseTensorsMap` read by this op."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "The shared name for the `SparseTensorsMap` read by this op.\nIt should not be blank; rather the `shared_name` or unique Operation name\nof the Op that created the original `SparseTensorsMap` should be used."
                                +}
                                +-}
                                +
                                +-- | Computes tan of x element-wise.
                                +
                                +tan :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                               (Data.Complex.Complex Float), Data.Int.Int32,
                                +                               Data.Int.Int64, Data.Word.Word16, Double,
                                +                               Float] t) => 
                                +       Tensor v'1 t -- ^ __x__
                                +       -> Tensor Build t -- ^ __y__
                                +tan = tan' id
                                +tan' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Int.Int32,
                                +                                Data.Int.Int64, Data.Word.Word16, Double,
                                +                                Float] t) => OpParams ->
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +tan' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Tan"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes hyperbolic tangent of `x` element-wise.
                                +
                                +tanh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor Build t -- ^ __y__
                                +tanh = tanh' id
                                +tanh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                 (Data.Complex.Complex Float), Data.Word.Word16,
                                +                                 Double, Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor Build t -- ^ __y__
                                +tanh' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Tanh"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Computes the gradient for the tanh of `x` wrt its input.
                                +--
                                +-- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
                                +-- is the corresponding input gradient.
                                +tanhGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                        (Data.Complex.Complex Float),
                                +                                        Data.Word.Word16, Double, Float] t) => 
                                +            Tensor v'1 t -- ^ __x__
                                +            -> Tensor v'2 t -- ^ __y__
                                +            -> Tensor Build t -- ^ __z__
                                +tanhGrad = tanhGrad' id
                                +tanhGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                         (Data.Complex.Complex Float),
                                +                                         Data.Word.Word16, Double, Float] t) =>
                                +             OpParams ->
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor v'2 t -- ^ __y__
                                +             -> Tensor Build t -- ^ __z__
                                +tanhGrad' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "TanhGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns a tensor that may be mutated, but only persists within a single step.
                                +--
                                +-- This is an experimental op for internal use only and it is possible to use this
                                +-- op in unsafe ways.  DO NOT USE unless you fully understand the risks.
                                +-- 
                                +-- It is the caller's responsibility to ensure that 'ref' is eventually passed to a
                                +-- matching 'DestroyTemporaryVariable' op after all other uses have completed.
                                +-- 
                                +-- Outputs a ref to the tensor state so it may be read or modified.
                                +-- 
                                +--   E.g.
                                +--       var = state_ops._temporary_variable([1, 2], types.float_)
                                +--       var_name = var.op.name
                                +--       var = state_ops.assign(var, [[4.0, 5.0]])
                                +--       var = state_ops.assign_add(var, [[6.0, 7.0]])
                                +--       final = state_ops._destroy_temporary_variable(var, var_name=var_name)
                                +temporaryVariable :: forall dtype m' . (MonadBuild m', TensorType dtype) => 
                                +                     Shape -- ^ __shape__: The shape of the variable tensor.
                                +                     -> m' (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor.
                                +temporaryVariable = temporaryVariable' id
                                +temporaryVariable' :: forall dtype m' . (MonadBuild m', TensorType dtype) =>
                                +                      OpParams ->
                                +                      Shape -- ^ __shape__: The shape of the variable tensor.
                                +                      -> m' (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor.
                                +temporaryVariable' op'options shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "TemporaryVariable"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "shape" .~ shape
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "ref"
                                +  description: "A reference to the variable tensor."
                                +  type_attr: "dtype"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  description: "The shape of the variable tensor."
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of elements in the variable tensor."
                                +}
                                +attr {
                                +  name: "var_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "Overrides the name used for the temporary variable resource. Default\nvalue is the name of the \'TemporaryVariable\' op (which is guaranteed unique)."
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArray :: forall v'1 m' . (MonadBuild m') => 
                                +               DataType -- ^ __dtype__
                                +               -> Tensor v'1 Data.Int.Int32 -- ^ __size__
                                +               -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__
                                +tensorArray = tensorArray' id
                                +tensorArray' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                DataType -- ^ __dtype__
                                +                -> Tensor v'1 Data.Int.Int32 -- ^ __size__
                                +                -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__
                                +tensorArray' op'options dtype size | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size]
                                +        buildOp [] (opDef "TensorArray"
                                +                    & opAttr "dtype" .~ dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "size" type: DT_INT32 }
                                +output_arg { name: "handle" type: DT_STRING is_ref: true }
                                +attr { name: "dtype" type: "type" }
                                +attr {
                                +  name: "dynamic_size" type: "bool" default_value { b: false }
                                +}
                                +attr {
                                +  name: "clear_after_read" type: "bool" default_value { b: true }
                                +}
                                +attr {
                                +  name: "tensor_array_name" type: "string" default_value { s: "" }
                                +}
                                +attr {
                                +  name: "element_shape"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArrayClose :: forall m' . (MonadBuild m') => 
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                    -> m' (ControlNode)
                                +tensorArrayClose = tensorArrayClose' id
                                +tensorArrayClose' :: forall m' . (MonadBuild m') => OpParams ->
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                     -> m' (ControlNode)
                                +tensorArrayClose' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "TensorArrayClose"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +-}
                                +
                                +-- | Deprecated. Use TensorArrayCloseV3
                                +
                                +tensorArrayCloseV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                      Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                      -> m' (ControlNode)
                                +tensorArrayCloseV2 = tensorArrayCloseV2' id
                                +tensorArrayCloseV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                       Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                       -> m' (ControlNode)
                                +tensorArrayCloseV2' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "TensorArrayCloseV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +-}
                                +
                                +-- | Delete the TensorArray from its resource container.
                                +--
                                +-- This enables the user to close and release the resource in the middle
                                +-- of a step/run.
                                +tensorArrayCloseV3 :: forall v'1 m' . (MonadBuild m') => 
                                +                      Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
                                +                      -> m' (ControlNode)
                                +tensorArrayCloseV3 = tensorArrayCloseV3' id
                                +tensorArrayCloseV3' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                       Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
                                +                       -> m' (ControlNode)
                                +tensorArrayCloseV3' op'options handle | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle]
                                +        buildOp [] (opDef "TensorArrayCloseV3"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)."
                                +  type: DT_RESOURCE
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArrayConcat :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => 
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                     -> Tensor v'2 Float -- ^ __flow_in__
                                +                     -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64))
                                +                     -- ^ (__value__, __lengths__)
                                +                     --
                                +                     -- * __value__
                                +                     --
                                +                     -- * __lengths__
                                +tensorArrayConcat = tensorArrayConcat' id
                                +tensorArrayConcat' :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) =>
                                +                      OpParams ->
                                +                      Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                      -> Tensor v'2 Float -- ^ __flow_in__
                                +                      -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64))
                                +                      -- ^ (__value__, __lengths__)
                                +                      --
                                +                      -- * __value__
                                +                      --
                                +                      -- * __lengths__
                                +tensorArrayConcat' op'options handle flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayConcat"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "value" type_attr: "dtype" }
                                +output_arg { name: "lengths" type: DT_INT64 }
                                +attr { name: "dtype" type: "type" }
                                +attr {
                                +  name: "element_shape_except0"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +}
                                +-}
                                +
                                +-- | Deprecated. Use TensorArrayConcatV3
                                +
                                +tensorArrayConcatV2 :: forall v'1 v'2 dtype . (TensorType dtype) => 
                                +                       Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                       -> Tensor v'2 Float -- ^ __flow_in__
                                +                       -> (Tensor Build dtype, Tensor Build Data.Int.Int64)
                                +                       -- ^ (__value__, __lengths__)
                                +                       --
                                +                       -- * __value__
                                +                       --
                                +                       -- * __lengths__
                                +tensorArrayConcatV2 = tensorArrayConcatV2' id
                                +tensorArrayConcatV2' :: forall v'1 v'2 dtype . (TensorType dtype) => OpParams ->
                                +                        Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                        -> Tensor v'2 Float -- ^ __flow_in__
                                +                        -> (Tensor Build dtype, Tensor Build Data.Int.Int64)
                                +                        -- ^ (__value__, __lengths__)
                                +                        --
                                +                        -- * __value__
                                +                        --
                                +                        -- * __lengths__
                                +tensorArrayConcatV2' op'options handle flow_in | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        return (opDef "TensorArrayConcatV2"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "value" type_attr: "dtype" }
                                +output_arg { name: "lengths" type: DT_INT64 }
                                +attr { name: "dtype" type: "type" }
                                +attr {
                                +  name: "element_shape_except0"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +}
                                +-}
                                +
                                +-- | Concat the elements from the TensorArray into value `value`.
                                +--
                                +-- Takes `T` elements of shapes
                                +-- 
                                +--   ```
                                +--   (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
                                +--   ```
                                +-- 
                                +-- and concatenates them into a Tensor of shape:
                                +-- 
                                +--   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
                                +-- 
                                +-- All elements must have the same shape (excepting the first dimension).
                                +tensorArrayConcatV3 :: forall v'1 v'2 dtype m' . (MonadBuild m',
                                +                                                  TensorType dtype) => 
                                +                       Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                       -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                       -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64))
                                +                       -- ^ (__value__, __lengths__)
                                +                       --
                                +                       -- * __value__: All of the elements in the TensorArray, concatenated along the first
                                +                       -- axis.
                                +                       --
                                +                       -- * __lengths__: A vector of the row sizes of the original T elements in the
                                +                       -- value output.  In the example above, this would be the values:
                                +                       -- `(n1, n2, ..., n(T-1))`.
                                +tensorArrayConcatV3 = tensorArrayConcatV3' id
                                +tensorArrayConcatV3' :: forall v'1 v'2 dtype m' . (MonadBuild m',
                                +                                                   TensorType dtype) =>
                                +                        OpParams ->
                                +                        Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                        -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                        -> m' ((Tensor Value dtype,
                                +                                Tensor Value Data.Int.Int64))
                                +                        -- ^ (__value__, __lengths__)
                                +                        --
                                +                        -- * __value__: All of the elements in the TensorArray, concatenated along the first
                                +                        -- axis.
                                +                        --
                                +                        -- * __lengths__: A vector of the row sizes of the original T elements in the
                                +                        -- value output.  In the example above, this would be the values:
                                +                        -- `(n1, n2, ..., n(T-1))`.
                                +tensorArrayConcatV3' op'options handle flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayConcatV3"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a TensorArray."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "flow_in"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "value"
                                +  description: "All of the elements in the TensorArray, concatenated along the first\naxis."
                                +  type_attr: "dtype"
                                +}
                                +output_arg {
                                +  name: "lengths"
                                +  description: "A vector of the row sizes of the original T elements in the\nvalue output.  In the example above, this would be the values:\n`(n1, n2, ..., n(T-1))`."
                                +  type: DT_INT64
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the elem that is returned."
                                +}
                                +attr {
                                +  name: "element_shape_except0"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +  description: "The expected shape of an element, if known,\nexcluding the first dimension. Used to validate the shapes of\nTensorArray elements. If this shape is not fully specified, concatenating\nzero-size TensorArrays is an error."
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArrayGather :: forall v'2 v'3 dtype m' . (MonadBuild m',
                                +                                                TensorType dtype) => 
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                     -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                     -> Tensor v'3 Float -- ^ __flow_in__
                                +                     -> m' (Tensor Value dtype) -- ^ __value__
                                +tensorArrayGather = tensorArrayGather' id
                                +tensorArrayGather' :: forall v'2 v'3 dtype m' . (MonadBuild m',
                                +                                                 TensorType dtype) =>
                                +                      OpParams ->
                                +                      Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                      -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                      -> Tensor v'3 Float -- ^ __flow_in__
                                +                      -> m' (Tensor Value dtype) -- ^ __value__
                                +tensorArrayGather' op'options handle indices flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayGather"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "value" type_attr: "dtype" }
                                +attr { name: "dtype" type: "type" }
                                +attr {
                                +  name: "element_shape"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +}
                                +-}
                                +
                                +-- | Deprecated. Use TensorArrayGatherV3
                                +
                                +tensorArrayGatherV2 :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => 
                                +                       Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                       -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                       -> Tensor v'3 Float -- ^ __flow_in__
                                +                       -> Tensor Build dtype -- ^ __value__
                                +tensorArrayGatherV2 = tensorArrayGatherV2' id
                                +tensorArrayGatherV2' :: forall v'1 v'2 v'3 dtype . (TensorType dtype) =>
                                +                        OpParams ->
                                +                        Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                        -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                        -> Tensor v'3 Float -- ^ __flow_in__
                                +                        -> Tensor Build dtype -- ^ __value__
                                +tensorArrayGatherV2' op'options handle indices flow_in | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs flow_in]
                                +        return (opDef "TensorArrayGatherV2"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "value" type_attr: "dtype" }
                                +attr { name: "dtype" type: "type" }
                                +attr {
                                +  name: "element_shape"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +}
                                +-}
                                +
                                +-- | Gather specific elements from the TensorArray into output `value`.
                                +--
                                +-- All elements selected by `indices` must have the same shape.
                                +tensorArrayGatherV3 :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m',
                                +                                                      TensorType dtype) => 
                                +                       Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                       -> Tensor v'2 Data.Int.Int32 -- ^ __indices__: The locations in the TensorArray from which to read tensor elements.
                                +                       -> Tensor v'3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                       -> m' (Tensor Value dtype) -- ^ __value__: All of the elements in the TensorArray, concatenated along a new
                                +                       -- axis (the new dimension 0).
                                +tensorArrayGatherV3 = tensorArrayGatherV3' id
                                +tensorArrayGatherV3' :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m',
                                +                                                       TensorType dtype) =>
                                +                        OpParams ->
                                +                        Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                        -> Tensor v'2 Data.Int.Int32 -- ^ __indices__: The locations in the TensorArray from which to read tensor elements.
                                +                        -> Tensor v'3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                        -> m' (Tensor Value dtype) -- ^ __value__: All of the elements in the TensorArray, concatenated along a new
                                +                        -- axis (the new dimension 0).
                                +tensorArrayGatherV3' op'options handle indices flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayGatherV3"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a TensorArray."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "The locations in the TensorArray from which to read tensor elements."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "flow_in"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "value"
                                +  description: "All of the elements in the TensorArray, concatenated along a new\naxis (the new dimension 0)."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the elem that is returned."
                                +}
                                +attr {
                                +  name: "element_shape"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +  description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error."
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArrayGrad :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                   Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                   -> Tensor v'2 Float -- ^ __flow_in__
                                +                   -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __grad_handle__
                                +tensorArrayGrad = tensorArrayGrad' id
                                +tensorArrayGrad' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                    Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                    -> Tensor v'2 Float -- ^ __flow_in__
                                +                    -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __grad_handle__
                                +tensorArrayGrad' op'options handle flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayGrad"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "grad_handle" type: DT_STRING is_ref: true }
                                +attr { name: "source" type: "string" }
                                +-}
                                +
                                +-- | Deprecated. Use TensorArrayGradV3
                                +
                                +tensorArrayGradV2 :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                     Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                     -> Tensor v'2 Float -- ^ __flow_in__
                                +                     -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __grad_handle__
                                +tensorArrayGradV2 = tensorArrayGradV2' id
                                +tensorArrayGradV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                      Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                      -> Tensor v'2 Float -- ^ __flow_in__
                                +                      -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __grad_handle__
                                +tensorArrayGradV2' op'options handle flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayGradV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "grad_handle" type: DT_STRING }
                                +attr { name: "source" type: "string" }
                                +-}
                                +
                                +-- | Creates a TensorArray for storing the gradients of values in the given handle.
                                +--
                                +-- If the given TensorArray gradient already exists, returns a reference to it.
                                +-- 
                                +-- Locks the size of the original TensorArray by disabling its dynamic size flag.
                                +-- 
                                +-- **A note about the input flow_in:**
                                +-- 
                                +-- The handle flow_in forces the execution of the gradient lookup to occur
                                +-- only after certain other operations have occurred.  For example, when
                                +-- the forward TensorArray is dynamically sized, writes to this TensorArray
                                +-- may resize the object.  The gradient TensorArray is statically sized based
                                +-- on the size of the forward TensorArray when this operation executes.
                                +-- Furthermore, the size of the forward TensorArray is frozen by this call.
                                +-- As a result, the flow is used to ensure that the call to generate the gradient
                                +-- TensorArray only happens after all writes are executed.
                                +-- 
                                +-- In the case of dynamically sized TensorArrays, gradient computation should
                                +-- only be performed on read operations that have themselves been chained via
                                +-- flow to occur only after all writes have executed. That way the final size
                                +-- of the forward TensorArray is known when this operation is called.
                                +-- 
                                +-- **A note about the source attribute:**
                                +-- 
                                +-- TensorArray gradient calls use an accumulator TensorArray object.  If
                                +-- multiple gradients are calculated and run in the same session, the multiple
                                +-- gradient nodes may accidentally flow through the same accumulator TensorArray.
                                +-- This double counts and generally breaks the TensorArray gradient flow.
                                +-- 
                                +-- The solution is to identify which gradient call this particular
                                +-- TensorArray gradient is being called in.  This is performed by identifying
                                +-- a unique string (e.g. "gradients", "gradients_1", ...) from the input
                                +-- gradient Tensor's name.  This string is used as a suffix when creating
                                +-- the TensorArray gradient object here (the attribute `source`).
                                +-- 
                                +-- The attribute `source` is added as a suffix to the forward TensorArray's
                                +-- name when performing the creation / lookup, so that each separate gradient
                                +-- calculation gets its own TensorArray accumulator.
                                +tensorArrayGradV3 :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                     Tensor v'1 ResourceHandle -- ^ __handle__: The handle to the forward TensorArray.
                                +                     -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                     -> m' ((Tensor Value ResourceHandle, Tensor Value Float))
                                +                     -- ^ (__grad_handle__, __flow_out__)
                                +                     --
                                +                     -- * __grad_handle__
                                +                     --
                                +                     -- * __flow_out__
                                +tensorArrayGradV3 = tensorArrayGradV3' id
                                +tensorArrayGradV3' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                      Tensor v'1 ResourceHandle -- ^ __handle__: The handle to the forward TensorArray.
                                +                      -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                      -> m' ((Tensor Value ResourceHandle, Tensor Value Float))
                                +                      -- ^ (__grad_handle__, __flow_out__)
                                +                      --
                                +                      -- * __grad_handle__
                                +                      --
                                +                      -- * __flow_out__
                                +tensorArrayGradV3' op'options handle flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayGradV3"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to the forward TensorArray."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "flow_in"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +output_arg { name: "grad_handle" type: DT_RESOURCE }
                                +output_arg { name: "flow_out" type: DT_FLOAT }
                                +attr {
                                +  name: "source"
                                +  type: "string"
                                +  description: "The gradient source string, used to decide which gradient TensorArray\nto return."
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArrayPack :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => 
                                +                   Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                   -> Tensor v'2 Float -- ^ __flow_in__
                                +                   -> m' (Tensor Value dtype) -- ^ __value__
                                +tensorArrayPack = tensorArrayPack' id
                                +tensorArrayPack' :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) =>
                                +                    OpParams ->
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                    -> Tensor v'2 Float -- ^ __flow_in__
                                +                    -> m' (Tensor Value dtype) -- ^ __value__
                                +tensorArrayPack' op'options handle flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayPack"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "value" type_attr: "dtype" }
                                +attr { name: "dtype" type: "type" }
                                +attr {
                                +  name: "element_shape"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArrayRead :: forall v'2 v'3 dtype m' . (MonadBuild m',
                                +                                              TensorType dtype) => 
                                +                   Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                   -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                   -> Tensor v'3 Float -- ^ __flow_in__
                                +                   -> m' (Tensor Value dtype) -- ^ __value__
                                +tensorArrayRead = tensorArrayRead' id
                                +tensorArrayRead' :: forall v'2 v'3 dtype m' . (MonadBuild m',
                                +                                               TensorType dtype) => OpParams ->
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                    -> Tensor v'3 Float -- ^ __flow_in__
                                +                    -> m' (Tensor Value dtype) -- ^ __value__
                                +tensorArrayRead' op'options handle index flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs index,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayRead"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "index" type: DT_INT32 }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "value" type_attr: "dtype" }
                                +attr { name: "dtype" type: "type" }
                                +-}
                                +
                                +-- | Deprecated. Use TensorArrayReadV3
                                +
                                +tensorArrayReadV2 :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => 
                                +                     Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                     -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                     -> Tensor v'3 Float -- ^ __flow_in__
                                +                     -> Tensor Build dtype -- ^ __value__
                                +tensorArrayReadV2 = tensorArrayReadV2' id
                                +tensorArrayReadV2' :: forall v'1 v'2 v'3 dtype . (TensorType dtype) =>
                                +                      OpParams ->
                                +                      Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                      -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                      -> Tensor v'3 Float -- ^ __flow_in__
                                +                      -> Tensor Build dtype -- ^ __value__
                                +tensorArrayReadV2' op'options handle index flow_in | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs index,
                                +                                                             buildInputs flow_in]
                                +        return (opDef "TensorArrayReadV2"
                                +                & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +input_arg { name: "index" type: DT_INT32 }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "value" type_attr: "dtype" }
                                +attr { name: "dtype" type: "type" }
                                +-}
                                +
                                +-- | Read an element from the TensorArray into output `value`.
                                +
                                +tensorArrayReadV3 :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m',
                                +                                                    TensorType dtype) => 
                                +                     Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                     -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                     -> Tensor v'3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                     -> m' (Tensor Value dtype) -- ^ __value__: The tensor that is read from the TensorArray.
                                +tensorArrayReadV3 = tensorArrayReadV3' id
                                +tensorArrayReadV3' :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m',
                                +                                                     TensorType dtype) =>
                                +                      OpParams ->
                                +                      Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                      -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                      -> Tensor v'3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                      -> m' (Tensor Value dtype) -- ^ __value__: The tensor that is read from the TensorArray.
                                +tensorArrayReadV3' op'options handle index flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs index,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayReadV3"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a TensorArray."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg { name: "index" type: DT_INT32 }
                                +input_arg {
                                +  name: "flow_in"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "value"
                                +  description: "The tensor that is read from the TensorArray."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the elem that is returned."
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArrayScatter :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) =>
                                +                      
                                +                      Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                      -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                      -> Tensor v'3 t -- ^ __value__
                                +                      -> Tensor v'4 Float -- ^ __flow_in__
                                +                      -> m' (Tensor Value Float) -- ^ __flow_out__
                                +tensorArrayScatter = tensorArrayScatter' id
                                +tensorArrayScatter' :: forall v'2 v'3 v'4 t m' . (MonadBuild m',
                                +                                                  TensorType t) => OpParams ->
                                +                       Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                       -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                       -> Tensor v'3 t -- ^ __value__
                                +                       -> Tensor v'4 Float -- ^ __flow_in__
                                +                       -> m' (Tensor Value Float) -- ^ __flow_out__
                                +tensorArrayScatter' op'options handle indices value flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs value,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayScatter"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +input_arg { name: "value" type_attr: "T" }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "flow_out" type: DT_FLOAT }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Deprecated. Use TensorArrayScatterV3
                                +
                                +tensorArrayScatterV2 :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => 
                                +                        Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                        -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                        -> Tensor v'3 t -- ^ __value__
                                +                        -> Tensor v'4 Float -- ^ __flow_in__
                                +                        -> Tensor Build Float -- ^ __flow_out__
                                +tensorArrayScatterV2 = tensorArrayScatterV2' id
                                +tensorArrayScatterV2' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) =>
                                +                         OpParams ->
                                +                         Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                         -> Tensor v'2 Data.Int.Int32 -- ^ __indices__
                                +                         -> Tensor v'3 t -- ^ __value__
                                +                         -> Tensor v'4 Float -- ^ __flow_in__
                                +                         -> Tensor Build Float -- ^ __flow_out__
                                +tensorArrayScatterV2' op'options handle indices value
                                +                      flow_in | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs value,
                                +                                                             buildInputs flow_in]
                                +        return (opDef "TensorArrayScatterV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +input_arg { name: "indices" type: DT_INT32 }
                                +input_arg { name: "value" type_attr: "T" }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "flow_out" type: DT_FLOAT }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Scatter the data from the input value into specific TensorArray elements.
                                +--
                                +-- `indices` must be a vector, its length must match the first dim of `value`.
                                +tensorArrayScatterV3 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m',
                                +                                                       TensorType t) => 
                                +                        Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                        -> Tensor v'2 Data.Int.Int32 -- ^ __indices__: The locations at which to write the tensor elements.
                                +                        -> Tensor v'3 t -- ^ __value__: The concatenated tensor to write to the TensorArray.
                                +                        -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                        -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
                                +tensorArrayScatterV3 = tensorArrayScatterV3' id
                                +tensorArrayScatterV3' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m',
                                +                                                        TensorType t) =>
                                +                         OpParams ->
                                +                         Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                         -> Tensor v'2 Data.Int.Int32 -- ^ __indices__: The locations at which to write the tensor elements.
                                +                         -> Tensor v'3 t -- ^ __value__: The concatenated tensor to write to the TensorArray.
                                +                         -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                         -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
                                +tensorArrayScatterV3' op'options handle indices value
                                +                      flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs indices,
                                +                                                             buildInputs value,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayScatterV3"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a TensorArray."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "indices"
                                +  description: "The locations at which to write the tensor elements."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "The concatenated tensor to write to the TensorArray."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "flow_in"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "flow_out"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArraySize :: forall v'2 m' . (MonadBuild m') => 
                                +                   Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                   -> Tensor v'2 Float -- ^ __flow_in__
                                +                   -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +tensorArraySize = tensorArraySize' id
                                +tensorArraySize' :: forall v'2 m' . (MonadBuild m') => OpParams ->
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                    -> Tensor v'2 Float -- ^ __flow_in__
                                +                    -> m' (Tensor Value Data.Int.Int32) -- ^ __size__
                                +tensorArraySize' op'options handle flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArraySize"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "size" type: DT_INT32 }
                                +-}
                                +
                                +-- | Deprecated. Use TensorArraySizeV3
                                +
                                +tensorArraySizeV2 :: 
                                +                     Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                     -> Tensor v'2 Float -- ^ __flow_in__
                                +                     -> Tensor Build Data.Int.Int32 -- ^ __size__
                                +tensorArraySizeV2 = tensorArraySizeV2' id
                                +tensorArraySizeV2' :: OpParams ->
                                +                      Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                      -> Tensor v'2 Float -- ^ __flow_in__
                                +                      -> Tensor Build Data.Int.Int32 -- ^ __size__
                                +tensorArraySizeV2' op'options handle flow_in | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        return (opDef "TensorArraySizeV2"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "size" type: DT_INT32 }
                                +-}
                                +
                                +-- | Get the current size of the TensorArray.
                                +
                                +tensorArraySizeV3 :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                     Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
                                +                     -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                     -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The current size of the TensorArray.
                                +tensorArraySizeV3 = tensorArraySizeV3' id
                                +tensorArraySizeV3' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                      Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
                                +                      -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                      -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The current size of the TensorArray.
                                +tensorArraySizeV3' op'options handle flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArraySizeV3"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "flow_in"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "size"
                                +  description: "The current size of the TensorArray."
                                +  type: DT_INT32
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArraySplit :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => 
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                    -> Tensor v'2 t -- ^ __value__
                                +                    -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__
                                +                    -> Tensor v'4 Float -- ^ __flow_in__
                                +                    -> m' (Tensor Value Float) -- ^ __flow_out__
                                +tensorArraySplit = tensorArraySplit' id
                                +tensorArraySplit' :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) =>
                                +                     OpParams ->
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                     -> Tensor v'2 t -- ^ __value__
                                +                     -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__
                                +                     -> Tensor v'4 Float -- ^ __flow_in__
                                +                     -> m' (Tensor Value Float) -- ^ __flow_out__
                                +tensorArraySplit' op'options handle value lengths flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs value,
                                +                                                             buildInputs lengths,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArraySplit"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "value" type_attr: "T" }
                                +input_arg { name: "lengths" type: DT_INT64 }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "flow_out" type: DT_FLOAT }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Deprecated. Use TensorArraySplitV3
                                +
                                +tensorArraySplitV2 :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => 
                                +                      Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                      -> Tensor v'2 t -- ^ __value__
                                +                      -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__
                                +                      -> Tensor v'4 Float -- ^ __flow_in__
                                +                      -> Tensor Build Float -- ^ __flow_out__
                                +tensorArraySplitV2 = tensorArraySplitV2' id
                                +tensorArraySplitV2' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams ->
                                +                       Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                       -> Tensor v'2 t -- ^ __value__
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__
                                +                       -> Tensor v'4 Float -- ^ __flow_in__
                                +                       -> Tensor Build Float -- ^ __flow_out__
                                +tensorArraySplitV2' op'options handle value lengths flow_in | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs value,
                                +                                                             buildInputs lengths,
                                +                                                             buildInputs flow_in]
                                +        return (opDef "TensorArraySplitV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +input_arg { name: "value" type_attr: "T" }
                                +input_arg { name: "lengths" type: DT_INT64 }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "flow_out" type: DT_FLOAT }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Split the data from the input value into TensorArray elements.
                                +--
                                +-- Assuming that `lengths` takes on values
                                +-- 
                                +--   ```(n0, n1, ..., n(T-1))```
                                +-- 
                                +-- and that `value` has shape
                                +-- 
                                +--   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
                                +-- 
                                +-- this splits values into a TensorArray with T tensors.
                                +-- 
                                +-- TensorArray index t will be the subtensor of values with starting position
                                +-- 
                                +--   ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
                                +-- 
                                +-- and having size
                                +-- 
                                +--   ```nt x d0 x d1 x ...```
                                +tensorArraySplitV3 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m',
                                +                                                     TensorType t) => 
                                +                      Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                      -> Tensor v'2 t -- ^ __value__: The concatenated tensor to write to the TensorArray.
                                +                      -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__: The vector of lengths, how to split the rows of value into the
                                +                                                   -- TensorArray.
                                +                      -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                      -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
                                +tensorArraySplitV3 = tensorArraySplitV3' id
                                +tensorArraySplitV3' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m',
                                +                                                      TensorType t) =>
                                +                       OpParams ->
                                +                       Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                       -> Tensor v'2 t -- ^ __value__: The concatenated tensor to write to the TensorArray.
                                +                       -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__: The vector of lengths, how to split the rows of value into the
                                +                                                    -- TensorArray.
                                +                       -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                       -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
                                +tensorArraySplitV3' op'options handle value lengths flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs value,
                                +                                                             buildInputs lengths,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArraySplitV3"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a TensorArray."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "The concatenated tensor to write to the TensorArray."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "lengths"
                                +  description: "The vector of lengths, how to split the rows of value into the\nTensorArray."
                                +  type: DT_INT64
                                +}
                                +input_arg {
                                +  name: "flow_in"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "flow_out"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArrayUnpack :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => 
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                     -> Tensor v'2 t -- ^ __value__
                                +                     -> Tensor v'3 Float -- ^ __flow_in__
                                +                     -> m' (Tensor Value Float) -- ^ __flow_out__
                                +tensorArrayUnpack = tensorArrayUnpack' id
                                +tensorArrayUnpack' :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) =>
                                +                      OpParams ->
                                +                      Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                      -> Tensor v'2 t -- ^ __value__
                                +                      -> Tensor v'3 Float -- ^ __flow_in__
                                +                      -> m' (Tensor Value Float) -- ^ __flow_out__
                                +tensorArrayUnpack' op'options handle value flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs value,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayUnpack"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "value" type_attr: "T" }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "flow_out" type: DT_FLOAT }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Deprecated. Use TensorArrayV3
                                +
                                +tensorArrayV2 :: forall v'1 m' . (MonadBuild m') => 
                                +                 DataType -- ^ __dtype__
                                +                 -> Tensor v'1 Data.Int.Int32 -- ^ __size__
                                +                 -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __handle__
                                +tensorArrayV2 = tensorArrayV2' id
                                +tensorArrayV2' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                  DataType -- ^ __dtype__
                                +                  -> Tensor v'1 Data.Int.Int32 -- ^ __size__
                                +                  -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __handle__
                                +tensorArrayV2' op'options dtype size | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size]
                                +        buildOp [] (opDef "TensorArrayV2"
                                +                    & opAttr "dtype" .~ dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "size" type: DT_INT32 }
                                +output_arg { name: "handle" type: DT_STRING }
                                +attr { name: "dtype" type: "type" }
                                +attr {
                                +  name: "element_shape"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +}
                                +attr {
                                +  name: "dynamic_size" type: "bool" default_value { b: false }
                                +}
                                +attr {
                                +  name: "clear_after_read" type: "bool" default_value { b: true }
                                +}
                                +attr {
                                +  name: "tensor_array_name" type: "string" default_value { s: "" }
                                +}
                                +-}
                                +
                                +-- | An array of Tensors of given size.
                                +--
                                +-- Write data via Write and read via Read or Pack.
                                +tensorArrayV3 :: forall v'1 m' . (MonadBuild m') => 
                                +                 DataType -- ^ __dtype__: The type of the elements on the tensor_array.
                                +                 -> Tensor v'1 Data.Int.Int32 -- ^ __size__: The size of the array.
                                +                 -> m' ((Tensor Value ResourceHandle, Tensor Value Float))
                                +                 -- ^ (__handle__, __flow__)
                                +                 --
                                +                 -- * __handle__: The handle to the TensorArray.
                                +                 --
                                +                 -- * __flow__: A scalar used to control gradient flow.
                                +tensorArrayV3 = tensorArrayV3' id
                                +tensorArrayV3' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                  DataType -- ^ __dtype__: The type of the elements on the tensor_array.
                                +                  -> Tensor v'1 Data.Int.Int32 -- ^ __size__: The size of the array.
                                +                  -> m' ((Tensor Value ResourceHandle, Tensor Value Float))
                                +                  -- ^ (__handle__, __flow__)
                                +                  --
                                +                  -- * __handle__: The handle to the TensorArray.
                                +                  --
                                +                  -- * __flow__: A scalar used to control gradient flow.
                                +tensorArrayV3' op'options dtype size | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size]
                                +        buildOp [] (opDef "TensorArrayV3"
                                +                    & opAttr "dtype" .~ dtype
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "size" description: "The size of the array." type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "handle"
                                +  description: "The handle to the TensorArray."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg {
                                +  name: "flow"
                                +  description: "A scalar used to control gradient flow."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the elements on the tensor_array."
                                +}
                                +attr {
                                +  name: "element_shape"
                                +  type: "shape"
                                +  default_value { shape { unknown_rank: true } }
                                +  description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error."
                                +}
                                +attr {
                                +  name: "dynamic_size"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "A boolean that determines whether writes to the TensorArray\nare allowed to grow the size.  By default, this is not allowed."
                                +}
                                +attr {
                                +  name: "clear_after_read"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If true (default), Tensors in the TensorArray are cleared\nafter being read.  This disables multiple read semantics but allows early\nrelease of memory."
                                +}
                                +attr {
                                +  name: "tensor_array_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "Overrides the name used for the temporary tensor_array\nresource. Default value is the name of the \'TensorArray\' op (which\nis guaranteed unique)."
                                +}
                                +-}
                                +
                                +-- | 
                                +
                                +tensorArrayWrite :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => 
                                +                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                    -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                    -> Tensor v'3 t -- ^ __value__
                                +                    -> Tensor v'4 Float -- ^ __flow_in__
                                +                    -> m' (Tensor Value Float) -- ^ __flow_out__
                                +tensorArrayWrite = tensorArrayWrite' id
                                +tensorArrayWrite' :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) =>
                                +                     OpParams ->
                                +                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__
                                +                     -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                     -> Tensor v'3 t -- ^ __value__
                                +                     -> Tensor v'4 Float -- ^ __flow_in__
                                +                     -> m' (Tensor Value Float) -- ^ __flow_out__
                                +tensorArrayWrite' op'options handle index value flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs index,
                                +                                                             buildInputs value,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayWrite"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING is_ref: true }
                                +input_arg { name: "index" type: DT_INT32 }
                                +input_arg { name: "value" type_attr: "T" }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "flow_out" type: DT_FLOAT }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Deprecated. Use TensorArrayGradV3
                                +
                                +tensorArrayWriteV2 :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => 
                                +                      Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                      -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                      -> Tensor v'3 t -- ^ __value__
                                +                      -> Tensor v'4 Float -- ^ __flow_in__
                                +                      -> Tensor Build Float -- ^ __flow_out__
                                +tensorArrayWriteV2 = tensorArrayWriteV2' id
                                +tensorArrayWriteV2' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams ->
                                +                       Tensor v'1 Data.ByteString.ByteString -- ^ __handle__
                                +                       -> Tensor v'2 Data.Int.Int32 -- ^ __index__
                                +                       -> Tensor v'3 t -- ^ __value__
                                +                       -> Tensor v'4 Float -- ^ __flow_in__
                                +                       -> Tensor Build Float -- ^ __flow_out__
                                +tensorArrayWriteV2' op'options handle index value flow_in | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs index,
                                +                                                             buildInputs value,
                                +                                                             buildInputs flow_in]
                                +        return (opDef "TensorArrayWriteV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "handle" type: DT_STRING }
                                +input_arg { name: "index" type: DT_INT32 }
                                +input_arg { name: "value" type_attr: "T" }
                                +input_arg { name: "flow_in" type: DT_FLOAT }
                                +output_arg { name: "flow_out" type: DT_FLOAT }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Push an element onto the tensor_array.
                                +
                                +tensorArrayWriteV3 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m',
                                +                                                     TensorType t) => 
                                +                      Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                      -> Tensor v'2 Data.Int.Int32 -- ^ __index__: The position to write to inside the TensorArray.
                                +                      -> Tensor v'3 t -- ^ __value__: The tensor to write to the TensorArray.
                                +                      -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                      -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
                                +tensorArrayWriteV3 = tensorArrayWriteV3' id
                                +tensorArrayWriteV3' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m',
                                +                                                      TensorType t) =>
                                +                       OpParams ->
                                +                       Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray.
                                +                       -> Tensor v'2 Data.Int.Int32 -- ^ __index__: The position to write to inside the TensorArray.
                                +                       -> Tensor v'3 t -- ^ __value__: The tensor to write to the TensorArray.
                                +                       -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
                                +                       -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
                                +tensorArrayWriteV3' op'options handle index value flow_in | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle,
                                +                                                             buildInputs index,
                                +                                                             buildInputs value,
                                +                                                             buildInputs flow_in]
                                +        buildOp [] (opDef "TensorArrayWriteV3"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "handle"
                                +  description: "The handle to a TensorArray."
                                +  type: DT_RESOURCE
                                +}
                                +input_arg {
                                +  name: "index"
                                +  description: "The position to write to inside the TensorArray."
                                +  type: DT_INT32
                                +}
                                +input_arg {
                                +  name: "value"
                                +  description: "The tensor to write to the TensorArray."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "flow_in"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "flow_out"
                                +  description: "A float scalar that enforces proper chaining of operations."
                                +  type: DT_FLOAT
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Creates a dataset that emits `components` as a tuple of tensors once.
                                +
                                +tensorDataset :: forall v'1 toutput_types m' . (MonadBuild m',
                                +                                                TensorTypes toutput_types) => 
                                +                 TensorList (v'1) toutput_types -- ^ __components__
                                +                 -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +tensorDataset = tensorDataset' id
                                +tensorDataset' :: forall v'1 toutput_types m' . (MonadBuild m',
                                +                                                 TensorTypes toutput_types) =>
                                +                  OpParams ->
                                +                  TensorList (v'1) toutput_types -- ^ __components__
                                +                  -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +tensorDataset' op'options components | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs components]
                                +        buildOp [] (opDef "TensorDataset"
                                +                    & opAttr "Toutput_types" .~ fromTensorTypes (Proxy :: Proxy toutput_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "components" type_list_attr: "Toutput_types" }
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "Toutput_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that emits each dim-0 slice of `components` once.
                                +
                                +tensorSliceDataset :: forall v'1 toutput_types m' . (MonadBuild m',
                                +                                                     TensorTypes toutput_types) =>
                                +                      
                                +                      TensorList (v'1) toutput_types -- ^ __components__
                                +                      -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +tensorSliceDataset = tensorSliceDataset' id
                                +tensorSliceDataset' :: forall v'1 toutput_types m' . (MonadBuild m',
                                +                                                      TensorTypes toutput_types) =>
                                +                       OpParams ->
                                +                       TensorList (v'1) toutput_types -- ^ __components__
                                +                       -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +tensorSliceDataset' op'options components | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs components]
                                +        buildOp [] (opDef "TensorSliceDataset"
                                +                    & opAttr "Toutput_types" .~ fromTensorTypes (Proxy :: Proxy toutput_types)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "components" type_list_attr: "Toutput_types" }
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "Toutput_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +-}
                                +
                                +-- | Outputs a `Summary` protocol buffer with a tensor.
                                +--
                                +-- This op is being phased out in favor of TensorSummaryV2, which lets callers pass
                                +-- a tag as well as a serialized SummaryMetadata proto string that contains
                                +-- plugin-specific data. We will keep this op to maintain backwards compatibility.
                                +tensorSummary :: forall v'1 t . (TensorType t) => 
                                +                 Tensor v'1 t -- ^ __tensor__: A tensor to serialize.
                                +                 -> Tensor Build Data.ByteString.ByteString -- ^ __summary__
                                +tensorSummary = tensorSummary' id
                                +tensorSummary' :: forall v'1 t . (TensorType t) => OpParams ->
                                +                  Tensor v'1 t -- ^ __tensor__: A tensor to serialize.
                                +                  -> Tensor Build Data.ByteString.ByteString -- ^ __summary__
                                +tensorSummary' op'options tensor | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor]
                                +        return (opDef "TensorSummary"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tensor" description: "A tensor to serialize." type_attr: "T"
                                +}
                                +output_arg { name: "summary" type: DT_STRING }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "description"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "A json-encoded SummaryDescription proto."
                                +}
                                +attr {
                                +  name: "labels"
                                +  type: "list(string)"
                                +  default_value { list { } }
                                +  description: "An unused list of strings."
                                +}
                                +attr {
                                +  name: "display_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "An unused string."
                                +}
                                +-}
                                +
                                +-- | Outputs a `Summary` protocol buffer with a tensor and per-plugin data.
                                +
                                +tensorSummaryV2 :: forall v'1 v'2 v'3 t . (TensorType t) => 
                                +                   Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: A string attached to this summary. Used for organization in TensorBoard.
                                +                   -> Tensor v'2 t -- ^ __tensor__: A tensor to serialize.
                                +                   -> Tensor v'3 Data.ByteString.ByteString -- ^ __serialized_summary_metadata__: A serialized SummaryMetadata proto. Contains plugin
                                +                                                            -- data.
                                +                   -> Tensor Build Data.ByteString.ByteString -- ^ __summary__
                                +tensorSummaryV2 = tensorSummaryV2' id
                                +tensorSummaryV2' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams ->
                                +                    Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: A string attached to this summary. Used for organization in TensorBoard.
                                +                    -> Tensor v'2 t -- ^ __tensor__: A tensor to serialize.
                                +                    -> Tensor v'3 Data.ByteString.ByteString -- ^ __serialized_summary_metadata__: A serialized SummaryMetadata proto. Contains plugin
                                +                                                             -- data.
                                +                    -> Tensor Build Data.ByteString.ByteString -- ^ __summary__
                                +tensorSummaryV2' op'options tag tensor
                                +                 serialized_summary_metadata | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag,
                                +                                                             buildInputs tensor,
                                +                                                             buildInputs serialized_summary_metadata]
                                +        return (opDef "TensorSummaryV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tag"
                                +  description: "A string attached to this summary. Used for organization in TensorBoard."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "tensor" description: "A tensor to serialize." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "serialized_summary_metadata"
                                +  description: "A serialized SummaryMetadata proto. Contains plugin\ndata."
                                +  type: DT_STRING
                                +}
                                +output_arg { name: "summary" type: DT_STRING }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Creates a dataset that emits the lines of one or more text files.
                                +
                                +textLineDataset :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +                   Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__: A scalar or a vector containing the name(s) of the file(s) to be
                                +                                                         -- read.
                                +                   -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__: A scalar containing either (i) the empty string (no
                                +                                                            -- compression), (ii) "ZLIB", or (iii) "GZIP".
                                +                   -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +textLineDataset = textLineDataset' id
                                +textLineDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +                    Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__: A scalar or a vector containing the name(s) of the file(s) to be
                                +                                                          -- read.
                                +                    -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__: A scalar containing either (i) the empty string (no
                                +                                                             -- compression), (ii) "ZLIB", or (iii) "GZIP".
                                +                    -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +textLineDataset' op'options filenames compression_type | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filenames,
                                +                                                             buildInputs compression_type]
                                +        buildOp [] (opDef "TextLineDataset"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "filenames"
                                +  description: "A scalar or a vector containing the name(s) of the file(s) to be\nread."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "compression_type"
                                +  description: "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\"."
                                +  type: DT_STRING
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +-}
                                +
                                +-- | A Reader that outputs the lines of a file delimited by '\n'.
                                +
                                +textLineReader :: forall m' . (MonadBuild m') => 
                                +                  m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +textLineReader = textLineReader' id
                                +textLineReader' :: forall m' . (MonadBuild m') => OpParams ->
                                +                   m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +textLineReader' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "TextLineReader"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "skip_header_lines"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of lines to skip from the beginning of every file."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +-}
                                +
                                +-- | A Reader that outputs the lines of a file delimited by '\n'.
                                +
                                +textLineReaderV2 :: forall m' . (MonadBuild m') => 
                                +                    m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +textLineReaderV2 = textLineReaderV2' id
                                +textLineReaderV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                     m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +textLineReaderV2' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "TextLineReaderV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "skip_header_lines"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Number of lines to skip from the beginning of every file."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +-}
                                +
                                +-- | Generates labels for candidate sampling with a learned unigram distribution.
                                +--
                                +-- See explanations of candidate sampling and the data formats at
                                +-- go/candidate-sampling.
                                +-- 
                                +-- For each batch, this op picks a single set of sampled candidate labels.
                                +-- 
                                +-- The advantages of sampling candidates per-batch are simplicity and the
                                +-- possibility of efficient dense matrix multiplication. The disadvantage is that
                                +-- the sampled candidates must be chosen independently of the context and of the
                                +-- true labels.
                                +threadUnsafeUnigramCandidateSampler :: forall v'1 m' . (MonadBuild m') => 
                                +                                       Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                                       -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                                       -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                                       -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                               -- candidates in a batch are unique. This requires some approximation to
                                +                                               -- estimate the post-rejection sampling probabilities.
                                +                                       -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                                    -- IDs of the num_true target_classes in the corresponding original label.
                                +                                       -> m' ((Tensor Value Data.Int.Int64,
                                +                                               Tensor Value Float,
                                +                                               Tensor Value Float))
                                +                                       -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                                       --
                                +                                       -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                                       -- the ID of a sampled candidate.
                                +                                       --
                                +                                       -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                                       -- the number of times each candidate is expected to occur in a batch
                                +                                       -- of sampled candidates. If unique=true, then this is a probability.
                                +                                       --
                                +                                       -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                                       -- candidate representing the number of times the candidate is expected
                                +                                       -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                                       -- probability.
                                +threadUnsafeUnigramCandidateSampler = threadUnsafeUnigramCandidateSampler' id
                                +threadUnsafeUnigramCandidateSampler' :: forall v'1 m' . (MonadBuild m') =>
                                +                                        OpParams ->
                                +                                        Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                                        -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                                        -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                                        -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                                -- candidates in a batch are unique. This requires some approximation to
                                +                                                -- estimate the post-rejection sampling probabilities.
                                +                                        -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                                     -- IDs of the num_true target_classes in the corresponding original label.
                                +                                        -> m' ((Tensor Value Data.Int.Int64,
                                +                                                Tensor Value Float,
                                +                                                Tensor Value Float))
                                +                                        -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                                        --
                                +                                        -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                                        -- the ID of a sampled candidate.
                                +                                        --
                                +                                        -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                                        -- the number of times each candidate is expected to occur in a batch
                                +                                        -- of sampled candidates. If unique=true, then this is a probability.
                                +                                        --
                                +                                        -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                                        -- candidate representing the number of times the candidate is expected
                                +                                        -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                                        -- probability.
                                +threadUnsafeUnigramCandidateSampler' op'options num_sampled num_true range_max
                                +                                     unique true_classes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes]
                                +        buildOp [] (opDef "ThreadUnsafeUnigramCandidateSampler"
                                +                    & opAttr "num_sampled" .~ num_sampled
                                +                    & opAttr "num_true" .~ num_true
                                +                    & opAttr "range_max" .~ range_max
                                +                    & opAttr "unique" .~ unique
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "true_classes"
                                +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sampled_candidates"
                                +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "true_expected_count"
                                +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "sampled_expected_count"
                                +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "num_true"
                                +  type: "int"
                                +  description: "Number of true labels per context."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "num_sampled"
                                +  type: "int"
                                +  description: "Number of candidates to randomly sample."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "unique"
                                +  type: "bool"
                                +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
                                +}
                                +attr {
                                +  name: "range_max"
                                +  type: "int"
                                +  description: "The sampler will sample integers from the interval [0, range_max)."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +-}
                                +
                                +-- | Constructs a tensor by tiling a given tensor.
                                +--
                                +-- This operation creates a new tensor by replicating `input` `multiples` times.
                                +-- The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
                                +-- and the values of `input` are replicated `multiples[i]` times along the 'i'th
                                +-- dimension. For example, tiling `[a b c d]` by `[2]` produces
                                +-- `[a b c d a b c d]`.
                                +tile :: forall v'1 v'2 t tmultiples . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] tmultiples) =>
                                +        
                                +        Tensor v'1 t -- ^ __input__: 1-D or higher.
                                +        -> Tensor v'2 tmultiples -- ^ __multiples__: 1-D. Length must be the same as the number of dimensions in `input`
                                +        -> Tensor Build t -- ^ __output__
                                +tile = tile' id
                                +tile' :: forall v'1 v'2 t tmultiples . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] tmultiples) =>
                                +         OpParams ->
                                +         Tensor v'1 t -- ^ __input__: 1-D or higher.
                                +         -> Tensor v'2 tmultiples -- ^ __multiples__: 1-D. Length must be the same as the number of dimensions in `input`
                                +         -> Tensor Build t -- ^ __output__
                                +tile' op'options input multiples | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs multiples]
                                +        return (opDef "Tile"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tmultiples" .~ tensorType (undefined :: tmultiples)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "1-D or higher." type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "multiples"
                                +  description: "1-D. Length must be the same as the number of dimensions in `input`"
                                +  type_attr: "Tmultiples"
                                +}
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tmultiples"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Returns the gradient of `Tile`.
                                +--
                                +-- Since `Tile` takes an input and repeats the input `multiples` times
                                +-- along each dimension, `TileGrad` takes in `multiples` and aggregates
                                +-- each repeated tile of `input` into `output`.
                                +tileGrad :: forall v'1 v'2 t . (TensorType t) => 
                                +            Tensor v'1 t -- ^ __input__
                                +            -> Tensor v'2 Data.Int.Int32 -- ^ __multiples__
                                +            -> Tensor Build t -- ^ __output__
                                +tileGrad = tileGrad' id
                                +tileGrad' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +             Tensor v'1 t -- ^ __input__
                                +             -> Tensor v'2 Data.Int.Int32 -- ^ __multiples__
                                +             -> Tensor Build t -- ^ __output__
                                +tileGrad' op'options input multiples | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs multiples]
                                +        return (opDef "TileGrad"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_attr: "T" }
                                +input_arg { name: "multiples" type: DT_INT32 }
                                +output_arg { name: "output" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Finds values and indices of the `k` largest elements for the last dimension.
                                +--
                                +-- If the input is a vector (rank-1), finds the `k` largest entries in the vector
                                +-- and outputs their values and indices as vectors.  Thus `values[j]` is the
                                +-- `j`-th largest entry in `input`, and its index is `indices[j]`.
                                +-- 
                                +-- For matrices (resp. higher rank input), computes the top `k` entries in each
                                +-- row (resp. vector along the last dimension).  Thus,
                                +-- 
                                +--     values.shape = indices.shape = input.shape[:-1] + [k]
                                +-- 
                                +-- If two elements are equal, the lower-index element appears first.
                                +-- 
                                +-- If `k` varies dynamically, use `TopKV2` below.
                                +topK :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64,
                                +                                Data.Int.Int8, Data.Word.Word16,
                                +                                Data.Word.Word8, Double, Float] t) => 
                                +        Data.Int.Int64 -- ^ __k__: Number of top elements to look for along the last dimension (along each
                                +                       -- row for matrices).
                                +        -> Tensor v'1 t -- ^ __input__: 1-D or higher with last dimension at least `k`.
                                +        -> (Tensor Build t, Tensor Build Data.Int.Int32)
                                +        -- ^ (__values__, __indices__)
                                +        --
                                +        -- * __values__: The `k` largest elements along each last dimensional slice.
                                +        --
                                +        -- * __indices__: The indices of `values` within the last dimension of `input`.
                                +topK = topK' id
                                +topK' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64,
                                +                                 Data.Int.Int8, Data.Word.Word16,
                                +                                 Data.Word.Word8, Double, Float] t) =>
                                +         OpParams ->
                                +         Data.Int.Int64 -- ^ __k__: Number of top elements to look for along the last dimension (along each
                                +                        -- row for matrices).
                                +         -> Tensor v'1 t -- ^ __input__: 1-D or higher with last dimension at least `k`.
                                +         -> (Tensor Build t, Tensor Build Data.Int.Int32)
                                +         -- ^ (__values__, __indices__)
                                +         --
                                +         -- * __values__: The `k` largest elements along each last dimensional slice.
                                +         --
                                +         -- * __indices__: The indices of `values` within the last dimension of `input`.
                                +topK' op'options k input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "TopK"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "k" .~ k
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "1-D or higher with last dimension at least `k`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "values"
                                +  description: "The `k` largest elements along each last dimensional slice."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "indices"
                                +  description: "The indices of `values` within the last dimension of `input`."
                                +  type: DT_INT32
                                +}
                                +attr {
                                +  name: "k"
                                +  type: "int"
                                +  description: "Number of top elements to look for along the last dimension (along each\nrow for matrices)."
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "sorted"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If true the resulting `k` elements will be sorted by the values in\ndescending order."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Finds values and indices of the `k` largest elements for the last dimension.
                                +--
                                +-- If the input is a vector (rank-1), finds the `k` largest entries in the vector
                                +-- and outputs their values and indices as vectors.  Thus `values[j]` is the
                                +-- `j`-th largest entry in `input`, and its index is `indices[j]`.
                                +-- 
                                +-- For matrices (resp. higher rank input), computes the top `k` entries in each
                                +-- row (resp. vector along the last dimension).  Thus,
                                +-- 
                                +--     values.shape = indices.shape = input.shape[:-1] + [k]
                                +-- 
                                +-- If two elements are equal, the lower-index element appears first.
                                +topKV2 :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                      Data.Int.Int64, Data.Int.Int8,
                                +                                      Data.Word.Word16, Data.Word.Word8, Double,
                                +                                      Float] t) => 
                                +          Tensor v'1 t -- ^ __input__: 1-D or higher with last dimension at least `k`.
                                +          -> Tensor v'2 Data.Int.Int32 -- ^ __k__: 0-D.  Number of top elements to look for along the last dimension (along each
                                +                                       -- row for matrices).
                                +          -> (Tensor Build t, Tensor Build Data.Int.Int32)
                                +          -- ^ (__values__, __indices__)
                                +          --
                                +          -- * __values__: The `k` largest elements along each last dimensional slice.
                                +          --
                                +          -- * __indices__: The indices of `values` within the last dimension of `input`.
                                +topKV2 = topKV2' id
                                +topKV2' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32,
                                +                                       Data.Int.Int64, Data.Int.Int8,
                                +                                       Data.Word.Word16, Data.Word.Word8,
                                +                                       Double, Float] t) => OpParams ->
                                +           Tensor v'1 t -- ^ __input__: 1-D or higher with last dimension at least `k`.
                                +           -> Tensor v'2 Data.Int.Int32 -- ^ __k__: 0-D.  Number of top elements to look for along the last dimension (along each
                                +                                        -- row for matrices).
                                +           -> (Tensor Build t, Tensor Build Data.Int.Int32)
                                +           -- ^ (__values__, __indices__)
                                +           --
                                +           -- * __values__: The `k` largest elements along each last dimensional slice.
                                +           --
                                +           -- * __indices__: The indices of `values` within the last dimension of `input`.
                                +topKV2' op'options input k | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input,
                                +                                                             buildInputs k]
                                +        return (opDef "TopKV2"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input"
                                +  description: "1-D or higher with last dimension at least `k`."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "k"
                                +  description: "0-D.  Number of top elements to look for along the last dimension (along each\nrow for matrices)."
                                +  type: DT_INT32
                                +}
                                +output_arg {
                                +  name: "values"
                                +  description: "The `k` largest elements along each last dimensional slice."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "indices"
                                +  description: "The indices of `values` within the last dimension of `input`."
                                +  type: DT_INT32
                                +}
                                +attr {
                                +  name: "sorted"
                                +  type: "bool"
                                +  default_value { b: true }
                                +  description: "If true the resulting `k` elements will be sorted by the values in\ndescending order."
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Shuffle dimensions of x according to a permutation.
                                +--
                                +-- The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
                                +--   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
                                +transpose :: forall v'1 v'2 t tperm . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                             Data.Int.Int64] tperm) =>
                                +             
                                +             Tensor v'1 t -- ^ __x__
                                +             -> Tensor v'2 tperm -- ^ __perm__
                                +             -> Tensor Build t -- ^ __y__
                                +transpose = transpose' id
                                +transpose' :: forall v'1 v'2 t tperm . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                              Data.Int.Int64] tperm) =>
                                +              OpParams ->
                                +              Tensor v'1 t -- ^ __x__
                                +              -> Tensor v'2 tperm -- ^ __perm__
                                +              -> Tensor Build t -- ^ __y__
                                +transpose' op'options x perm | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs perm]
                                +        return (opDef "Transpose"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tperm" .~ tensorType (undefined :: tperm)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "perm" type_attr: "Tperm" }
                                +output_arg { name: "y" type_attr: "T" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "Tperm"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Returns x / y element-wise for integer types.
                                +--
                                +-- Truncation designates that negative numbers will round fractional quantities
                                +-- toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different
                                +-- than Python semantics. See `FloorDiv` for a division function that matches
                                +-- Python Semantics.
                                +-- 
                                +-- *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +truncateDiv :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                           (Data.Complex.Complex Float),
                                +                                           Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16, Data.Word.Word8,
                                +                                           Double, Float] t) => 
                                +               Tensor v'1 t -- ^ __x__
                                +               -> Tensor v'2 t -- ^ __y__
                                +               -> Tensor Build t -- ^ __z__
                                +truncateDiv = truncateDiv' id
                                +truncateDiv' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double),
                                +                                            (Data.Complex.Complex Float),
                                +                                            Data.Int.Int16, Data.Int.Int32,
                                +                                            Data.Int.Int64, Data.Int.Int8,
                                +                                            Data.Word.Word16, Data.Word.Word8,
                                +                                            Double, Float] t) => OpParams ->
                                +                Tensor v'1 t -- ^ __x__
                                +                -> Tensor v'2 t -- ^ __y__
                                +                -> Tensor Build t -- ^ __z__
                                +truncateDiv' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "TruncateDiv"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_HALF
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_UINT8
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Returns element-wise remainder of division. This emulates C semantics in that
                                +--
                                +-- the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
                                +-- y + truncate_mod(x, y) = x`.
                                +-- 
                                +-- *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
                                +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
                                +truncateMod :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                           Double, Float] t) => 
                                +               Tensor v'1 t -- ^ __x__
                                +               -> Tensor v'2 t -- ^ __y__
                                +               -> Tensor Build t -- ^ __z__
                                +truncateMod = truncateMod' id
                                +truncateMod' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64,
                                +                                            Double, Float] t) => OpParams ->
                                +                Tensor v'1 t -- ^ __x__
                                +                -> Tensor v'2 t -- ^ __y__
                                +                -> Tensor Build t -- ^ __z__
                                +truncateMod' op'options x y | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs y]
                                +        return (opDef "TruncateMod"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "y" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
                                +    }
                                +  }
                                +}
                                +-}
                                +
                                +-- | Outputs random values from a truncated normal distribution.
                                +--
                                +-- The generated values follow a normal distribution with mean 0 and standard
                                +-- deviation 1, except that values whose magnitude is more than 2 standard
                                +-- deviations from the mean are dropped and re-picked.
                                +truncatedNormal :: forall v'1 dtype t m' . (MonadBuild m',
                                +                                            OneOf '[Data.Word.Word16, Double,
                                +                                                    Float] dtype,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] t) => 
                                +                   Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                   -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random truncated normal
                                +                   -- values.
                                +truncatedNormal = truncatedNormal' id
                                +truncatedNormal' :: forall v'1 dtype t m' . (MonadBuild m',
                                +                                             OneOf '[Data.Word.Word16, Double,
                                +                                                     Float] dtype,
                                +                                             OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] t) =>
                                +                    OpParams ->
                                +                    Tensor v'1 t -- ^ __shape__: The shape of the output tensor.
                                +                    -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random truncated normal
                                +                    -- values.
                                +truncatedNormal' op'options shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape]
                                +        buildOp [] (opDef "TruncatedNormal"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "shape"
                                +  description: "The shape of the output tensor."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "A tensor of the specified shape filled with random truncated normal\nvalues."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "A second seed to avoid seed collision."
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of the output."
                                +  allowed_values {
                                +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
                                +  }
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Generates labels for candidate sampling with a uniform distribution.
                                +--
                                +-- See explanations of candidate sampling and the data formats at
                                +-- go/candidate-sampling.
                                +-- 
                                +-- For each batch, this op picks a single set of sampled candidate labels.
                                +-- 
                                +-- The advantages of sampling candidates per-batch are simplicity and the
                                +-- possibility of efficient dense matrix multiplication. The disadvantage is that
                                +-- the sampled candidates must be chosen independently of the context and of the
                                +-- true labels.
                                +uniformCandidateSampler :: forall v'1 m' . (MonadBuild m') => 
                                +                           Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                           -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                           -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                           -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                   -- candidates in a batch are unique. This requires some approximation to
                                +                                   -- estimate the post-rejection sampling probabilities.
                                +                           -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                        -- IDs of the num_true target_classes in the corresponding original label.
                                +                           -> m' ((Tensor Value Data.Int.Int64,
                                +                                   Tensor Value Float, Tensor Value Float))
                                +                           -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                           --
                                +                           -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                           -- the ID of a sampled candidate.
                                +                           --
                                +                           -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                           -- the number of times each candidate is expected to occur in a batch
                                +                           -- of sampled candidates. If unique=true, then this is a probability.
                                +                           --
                                +                           -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                           -- candidate representing the number of times the candidate is expected
                                +                           -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                           -- probability.
                                +uniformCandidateSampler = uniformCandidateSampler' id
                                +uniformCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                            Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample.
                                +                            -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
                                +                            -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
                                +                            -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
                                +                                    -- candidates in a batch are unique. This requires some approximation to
                                +                                    -- estimate the post-rejection sampling probabilities.
                                +                            -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
                                +                                                         -- IDs of the num_true target_classes in the corresponding original label.
                                +                            -> m' ((Tensor Value Data.Int.Int64,
                                +                                    Tensor Value Float, Tensor Value Float))
                                +                            -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
                                +                            --
                                +                            -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
                                +                            -- the ID of a sampled candidate.
                                +                            --
                                +                            -- * __true_expected_count__: A batch_size * num_true matrix, representing
                                +                            -- the number of times each candidate is expected to occur in a batch
                                +                            -- of sampled candidates. If unique=true, then this is a probability.
                                +                            --
                                +                            -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
                                +                            -- candidate representing the number of times the candidate is expected
                                +                            -- to occur in a batch of sampled candidates.  If unique=true, then this is a
                                +                            -- probability.
                                +uniformCandidateSampler' op'options num_sampled num_true range_max unique
                                +                         true_classes | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes]
                                +        buildOp [] (opDef "UniformCandidateSampler"
                                +                    & opAttr "num_sampled" .~ num_sampled
                                +                    & opAttr "num_true" .~ num_true
                                +                    & opAttr "range_max" .~ range_max
                                +                    & opAttr "unique" .~ unique
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "true_classes"
                                +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "sampled_candidates"
                                +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
                                +  type: DT_INT64
                                +}
                                +output_arg {
                                +  name: "true_expected_count"
                                +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
                                +  type: DT_FLOAT
                                +}
                                +output_arg {
                                +  name: "sampled_expected_count"
                                +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
                                +  type: DT_FLOAT
                                +}
                                +attr {
                                +  name: "num_true"
                                +  type: "int"
                                +  description: "Number of true labels per context."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "num_sampled"
                                +  type: "int"
                                +  description: "Number of candidates to randomly sample."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "unique"
                                +  type: "bool"
                                +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
                                +}
                                +attr {
                                +  name: "range_max"
                                +  type: "int"
                                +  description: "The sampler will sample integers from the interval [0, range_max)."
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "seed"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
                                +}
                                +attr {
                                +  name: "seed2"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "An second seed to avoid seed collision."
                                +}
                                +-}
                                +
                                +-- | Finds unique elements in a 1-D tensor.
                                +--
                                +-- This operation returns a tensor `y` containing all of the unique elements of `x`
                                +-- sorted in the same order that they occur in `x`. This operation also returns a
                                +-- tensor `idx` the same size as `x` that contains the index of each value of `x`
                                +-- in the unique output `y`. In other words:
                                +-- 
                                +-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
                                +-- y, idx = unique(x)
                                +-- y ==> [1, 2, 4, 7, 8]
                                +-- idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
                                +-- ```
                                +unique :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                        Data.Int.Int64] out_idx) =>
                                +          
                                +          Tensor v'1 t -- ^ __x__: 1-D.
                                +          -> (Tensor Build t, Tensor Build out_idx) -- ^ (__y__, __idx__)
                                +          --
                                +          -- * __y__: 1-D.
                                +          --
                                +          -- * __idx__: 1-D.
                                +unique = unique' id
                                +unique' :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32,
                                +                                                         Data.Int.Int64] out_idx) =>
                                +           OpParams ->
                                +           Tensor v'1 t -- ^ __x__: 1-D.
                                +           -> (Tensor Build t, Tensor Build out_idx) -- ^ (__y__, __idx__)
                                +           --
                                +           -- * __y__: 1-D.
                                +           --
                                +           -- * __idx__: 1-D.
                                +unique' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "Unique"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "out_idx" .~ tensorType (undefined :: out_idx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" description: "1-D." type_attr: "T" }
                                +output_arg { name: "y" description: "1-D." type_attr: "T" }
                                +output_arg { name: "idx" description: "1-D." type_attr: "out_idx" }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "out_idx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Finds unique elements in a 1-D tensor.
                                +--
                                +-- This operation returns a tensor `y` containing all of the unique elements of `x`
                                +-- sorted in the same order that they occur in `x`. This operation also returns a
                                +-- tensor `idx` the same size as `x` that contains the index of each value of `x`
                                +-- in the unique output `y`. Finally, it returns a third tensor `count` that
                                +-- contains the count of each element of `y` in `x`. In other words:
                                +-- 
                                +-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
                                +-- y, idx, count = unique_with_counts(x)
                                +-- y ==> [1, 2, 4, 7, 8]
                                +-- idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
                                +-- count ==> [2, 1, 3, 1, 2]
                                +-- ```
                                +uniqueWithCounts :: forall v'1 t out_idx . (TensorType t,
                                +                                            OneOf '[Data.Int.Int32,
                                +                                                    Data.Int.Int64] out_idx) => 
                                +                    Tensor v'1 t -- ^ __x__: 1-D.
                                +                    -> (Tensor Build t, Tensor Build out_idx,
                                +                        Tensor Build out_idx) -- ^ (__y__, __idx__, __count__)
                                +                    --
                                +                    -- * __y__: 1-D.
                                +                    --
                                +                    -- * __idx__: 1-D.
                                +                    --
                                +                    -- * __count__: 1-D.
                                +uniqueWithCounts = uniqueWithCounts' id
                                +uniqueWithCounts' :: forall v'1 t out_idx . (TensorType t,
                                +                                             OneOf '[Data.Int.Int32,
                                +                                                     Data.Int.Int64] out_idx) =>
                                +                     OpParams ->
                                +                     Tensor v'1 t -- ^ __x__: 1-D.
                                +                     -> (Tensor Build t, Tensor Build out_idx,
                                +                         Tensor Build out_idx) -- ^ (__y__, __idx__, __count__)
                                +                     --
                                +                     -- * __y__: 1-D.
                                +                     --
                                +                     -- * __idx__: 1-D.
                                +                     --
                                +                     -- * __count__: 1-D.
                                +uniqueWithCounts' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "UniqueWithCounts"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "out_idx" .~ tensorType (undefined :: out_idx)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" description: "1-D." type_attr: "T" }
                                +output_arg { name: "y" description: "1-D." type_attr: "T" }
                                +output_arg { name: "idx" description: "1-D." type_attr: "out_idx" }
                                +output_arg {
                                +  name: "count" description: "1-D." type_attr: "out_idx"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "out_idx"
                                +  type: "type"
                                +  default_value { type: DT_INT32 }
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
                                +--
                                +-- Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
                                +-- For example, given a tensor of shape `(A, B, C, D)`;
                                +-- 
                                +-- If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
                                +--   and each tensor in `output` will have shape `(B, C, D)`. (Note that the
                                +--   dimension unpacked along is gone, unlike `split`).
                                +-- 
                                +-- If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
                                +--   and each tensor in `output` will have shape `(A, C, D)`.
                                +-- Etc.
                                +-- 
                                +-- This is the opposite of `pack`.
                                +unpack :: forall v'1 t . (TensorType t) => 
                                +          Data.Int.Int64 -- ^ __num__
                                +          -> Tensor v'1 t -- ^ __value__: 1-D or higher, with `axis` dimension size equal to `num`.
                                +          -> [Tensor Build t] -- ^ __output__: The list of tensors unpacked from `value`.
                                +unpack = unpack' id
                                +unpack' :: forall v'1 t . (TensorType t) => OpParams ->
                                +           Data.Int.Int64 -- ^ __num__
                                +           -> Tensor v'1 t -- ^ __value__: 1-D or higher, with `axis` dimension size equal to `num`.
                                +           -> [Tensor Build t] -- ^ __output__: The list of tensors unpacked from `value`.
                                +unpack' op'options num value | eqLengthGuard [] =
                                +    pureOp [num] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value]
                                +        return (opDef "Unpack"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "num" .~ num
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "1-D or higher, with `axis` dimension size equal to `num`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "The list of tensors unpacked from `value`."
                                +  type_attr: "T"
                                +  number_attr: "num"
                                +}
                                +attr { name: "num" type: "int" has_minimum: true }
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "axis"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  description: "Dimension along which to unpack.  Negative values wrap around, so the\nvalid range is `[-R, R)`."
                                +}
                                +-}
                                +
                                +-- | Computes the Max along segments of a tensor.
                                +--
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +-- 
                                +-- This operator is similar to the [unsorted segment sum operator](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
                                +-- Instead of computing the sum over segments, it computes the maximum
                                +-- such that:
                                +-- 
                                +-- \\(output_i = \max_j data_j\\) where max is over `j` such
                                +-- that `segment_ids[j] == i`.
                                +-- 
                                +-- If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for specific numeric type,
                                +--  `output[i] = numeric_limits<T>::min()`.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
                                +-- </div>
                                +unsortedSegmentMax :: forall v'1 v'2 v'3 t tindices . (OneOf '[Data.Int.Int16,
                                +                                                               Data.Int.Int32,
                                +                                                               Data.Int.Int64,
                                +                                                               Data.Int.Int8,
                                +                                                               Data.Word.Word16,
                                +                                                               Data.Word.Word8,
                                +                                                               Double, Float] t,
                                +                                                       OneOf '[Data.Int.Int32,
                                +                                                               Data.Int.Int64] tindices) =>
                                +                      
                                +                      Tensor v'1 t -- ^ __data__
                                +                      -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                             -- first dimension.
                                +                      -> Tensor v'3 Data.Int.Int32 -- ^ __num_segments__
                                +                      -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                      -- has size `num_segments`.
                                +unsortedSegmentMax = unsortedSegmentMax' id
                                +unsortedSegmentMax' :: forall v'1 v'2 v'3 t tindices . (OneOf '[Data.Int.Int16,
                                +                                                                Data.Int.Int32,
                                +                                                                Data.Int.Int64,
                                +                                                                Data.Int.Int8,
                                +                                                                Data.Word.Word16,
                                +                                                                Data.Word.Word8,
                                +                                                                Double,
                                +                                                                Float] t,
                                +                                                        OneOf '[Data.Int.Int32,
                                +                                                                Data.Int.Int64] tindices) =>
                                +                       OpParams ->
                                +                       Tensor v'1 t -- ^ __data__
                                +                       -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
                                +                                              -- first dimension.
                                +                       -> Tensor v'3 Data.Int.Int32 -- ^ __num_segments__
                                +                       -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which
                                +                       -- has size `num_segments`.
                                +unsortedSegmentMax' op'options data' segment_ids
                                +                    num_segments | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs segment_ids,
                                +                                                             buildInputs num_segments]
                                +        return (opDef "UnsortedSegmentMax"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg { name: "num_segments" type: DT_INT32 }
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for dimension 0 which\nhas size `num_segments`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT32
                                +      type: DT_INT64
                                +      type: DT_UINT8
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_UINT16
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Computes the sum along segments of a tensor.
                                +--
                                +-- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
                                +-- segments.
                                +-- 
                                +-- Computes a tensor such that
                                +-- `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
                                +-- that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
                                +-- need not be sorted and need not cover all values in the full
                                +-- range of valid values.
                                +-- 
                                +-- If the sum is empty for a given segment ID `i`, `output[i] = 0`.
                                +-- 
                                +-- `num_segments` should equal the number of distinct segment IDs.
                                +-- 
                                +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
                                +-- <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
                                +-- </div>
                                +unsortedSegmentSum :: forall v'1 v'2 v'3 t
                                +                      tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                          (Data.Complex.Complex Float),
                                +                                          Data.Int.Int16, Data.Int.Int32,
                                +                                          Data.Int.Int64, Data.Int.Int8,
                                +                                          Data.Word.Word16, Data.Word.Word8,
                                +                                          Double, Float] t,
                                +                                  OneOf '[Data.Int.Int32,
                                +                                          Data.Int.Int64] tindices) => 
                                +                      Tensor v'1 t -- ^ __data__
                                +                      -> Tensor v'2 tindices -- ^ __segment_ids__: A tensor whose shape is a prefix of `data.shape`.
                                +                      -> Tensor v'3 Data.Int.Int32 -- ^ __num_segments__
                                +                      -> Tensor Build t -- ^ __output__: Has same shape as data, except for the first `segment_ids.rank`
                                +                      -- dimensions, which are replaced with a single dimension which has size
                                +                      -- `num_segments`.
                                +unsortedSegmentSum = unsortedSegmentSum' id
                                +unsortedSegmentSum' :: forall v'1 v'2 v'3 t
                                +                       tindices . (OneOf '[(Data.Complex.Complex Double),
                                +                                           (Data.Complex.Complex Float),
                                +                                           Data.Int.Int16, Data.Int.Int32,
                                +                                           Data.Int.Int64, Data.Int.Int8,
                                +                                           Data.Word.Word16, Data.Word.Word8,
                                +                                           Double, Float] t,
                                +                                   OneOf '[Data.Int.Int32,
                                +                                           Data.Int.Int64] tindices) =>
                                +                       OpParams ->
                                +                       Tensor v'1 t -- ^ __data__
                                +                       -> Tensor v'2 tindices -- ^ __segment_ids__: A tensor whose shape is a prefix of `data.shape`.
                                +                       -> Tensor v'3 Data.Int.Int32 -- ^ __num_segments__
                                +                       -> Tensor Build t -- ^ __output__: Has same shape as data, except for the first `segment_ids.rank`
                                +                       -- dimensions, which are replaced with a single dimension which has size
                                +                       -- `num_segments`.
                                +unsortedSegmentSum' op'options data' segment_ids
                                +                    num_segments | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data',
                                +                                                             buildInputs segment_ids,
                                +                                                             buildInputs num_segments]
                                +        return (opDef "UnsortedSegmentSum"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "Tindices" .~ tensorType (undefined :: tindices)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "data" type_attr: "T" }
                                +input_arg {
                                +  name: "segment_ids"
                                +  description: "A tensor whose shape is a prefix of `data.shape`."
                                +  type_attr: "Tindices"
                                +}
                                +input_arg { name: "num_segments" type: DT_INT32 }
                                +output_arg {
                                +  name: "output"
                                +  description: "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`."
                                +  type_attr: "T"
                                +}
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values {
                                +    list {
                                +      type: DT_FLOAT
                                +      type: DT_DOUBLE
                                +      type: DT_INT64
                                +      type: DT_INT32
                                +      type: DT_UINT8
                                +      type: DT_UINT16
                                +      type: DT_INT16
                                +      type: DT_INT8
                                +      type: DT_COMPLEX64
                                +      type: DT_COMPLEX128
                                +      type: DT_QINT8
                                +      type: DT_QUINT8
                                +      type: DT_QINT32
                                +      type: DT_HALF
                                +    }
                                +  }
                                +}
                                +attr {
                                +  name: "Tindices"
                                +  type: "type"
                                +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
                                +}
                                +-}
                                +
                                +-- | Op is similar to a lightweight Dequeue.
                                +--
                                +-- The basic functionality is similar to dequeue with many fewer
                                +-- capabilities and options.  This Op is optimized for performance.
                                +unstage :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) => 
                                +           m' (TensorList (Value) dtypes) -- ^ __values__
                                +unstage = unstage' id
                                +unstage' :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) =>
                                +            OpParams ->
                                +            m' (TensorList (Value) dtypes) -- ^ __values__
                                +unstage' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "Unstage"
                                +                    & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "values" type_list_attr: "dtypes" }
                                +attr {
                                +  name: "capacity"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "memory_limit"
                                +  type: "int"
                                +  default_value { i: 0 }
                                +  has_minimum: true
                                +}
                                +attr {
                                +  name: "dtypes" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Creates a handle to a Variable resource.
                                +
                                +varHandleOp :: forall m' . (MonadBuild m') => 
                                +               DataType -- ^ __dtype__: the type of this variable. Must agree with the dtypes
                                +                        -- of all ops using this variable.
                                +               -> Shape -- ^ __shape__: The (possibly partially specified) shape of this variable.
                                +               -> m' (Tensor Value ResourceHandle) -- ^ __resource__
                                +varHandleOp = varHandleOp' id
                                +varHandleOp' :: forall m' . (MonadBuild m') => OpParams ->
                                +                DataType -- ^ __dtype__: the type of this variable. Must agree with the dtypes
                                +                         -- of all ops using this variable.
                                +                -> Shape -- ^ __shape__: The (possibly partially specified) shape of this variable.
                                +                -> m' (Tensor Value ResourceHandle) -- ^ __resource__
                                +varHandleOp' op'options dtype shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "VarHandleOp"
                                +                    & opAttr "dtype" .~ dtype
                                +                    & opAttr "shape" .~ shape
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "resource" type: DT_RESOURCE }
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "the container this variable is placed in."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "the name by which this variable is referred to."
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "the type of this variable. Must agree with the dtypes\nof all ops using this variable."
                                +}
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  description: "The (possibly partially specified) shape of this variable."
                                +}
                                +-}
                                +
                                +-- | Checks whether a resource handle-based variable has been initialized.
                                +
                                +varIsInitializedOp :: forall v'1 m' . (MonadBuild m') => 
                                +                      Tensor v'1 ResourceHandle -- ^ __resource__: the input resource handle.
                                +                      -> m' (Tensor Value Bool) -- ^ __is_initialized__: a scalar boolean which is true if the variable has been
                                +                      -- initialized.
                                +varIsInitializedOp = varIsInitializedOp' id
                                +varIsInitializedOp' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +                       Tensor v'1 ResourceHandle -- ^ __resource__: the input resource handle.
                                +                       -> m' (Tensor Value Bool) -- ^ __is_initialized__: a scalar boolean which is true if the variable has been
                                +                       -- initialized.
                                +varIsInitializedOp' op'options resource | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource]
                                +        buildOp [] (opDef "VarIsInitializedOp"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "resource"
                                +  description: "the input resource handle."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg {
                                +  name: "is_initialized"
                                +  description: "a scalar boolean which is true if the variable has been\ninitialized."
                                +  type: DT_BOOL
                                +}
                                +-}
                                +
                                +-- | Use VariableV2 instead.
                                +
                                +variable :: forall dtype m' . (MonadBuild m', TensorType dtype) => 
                                +            Shape -- ^ __shape__
                                +            -> m' (Tensor Ref dtype) -- ^ __ref__
                                +variable = variable' id
                                +variable' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams ->
                                +             Shape -- ^ __shape__
                                +             -> m' (Tensor Ref dtype) -- ^ __ref__
                                +variable' op'options shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "Variable"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "shape" .~ shape
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg { name: "ref" type_attr: "dtype" is_ref: true }
                                +attr { name: "shape" type: "shape" }
                                +attr { name: "dtype" type: "type" }
                                +attr { name: "container" type: "string" default_value { s: "" } }
                                +attr { name: "shared_name" type: "string" default_value { s: "" } }
                                +-}
                                +
                                +-- | Holds state in the form of a tensor that persists across steps.
                                +--
                                +-- Outputs a ref to the tensor state so it may be read or modified.
                                +-- TODO(zhifengc/mrry): Adds a pointer to a more detail document
                                +-- about sharing states in tensorflow.
                                +variableV2 :: forall dtype m' . (MonadBuild m', TensorType dtype) => 
                                +              Shape -- ^ __shape__: The shape of the variable tensor.
                                +              -> m' (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor.
                                +variableV2 = variableV2' id
                                +variableV2' :: forall dtype m' . (MonadBuild m', TensorType dtype) =>
                                +               OpParams ->
                                +               Shape -- ^ __shape__: The shape of the variable tensor.
                                +               -> m' (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor.
                                +variableV2' op'options shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "VariableV2"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "shape" .~ shape
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "ref"
                                +  description: "A reference to the variable tensor."
                                +  type_attr: "dtype"
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  description: "The shape of the variable tensor."
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The type of elements in the variable tensor."
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +-}
                                +
                                +-- | Returns locations of true values in a boolean tensor.
                                +--
                                +-- This operation returns the coordinates of true elements in `input`. The
                                +-- coordinates are returned in a 2-D tensor where the first dimension (rows)
                                +-- represents the number of true elements, and the second dimension (columns)
                                +-- represents the coordinates of the true elements. Keep in mind, the shape of
                                +-- the output tensor can vary depending on how many true values there are in
                                +-- `input`. Indices are output in row-major order.
                                +-- 
                                +-- For example:
                                +-- 
                                +-- ```
                                +-- # 'input' tensor is [[True, False]
                                +-- #                    [True, False]]
                                +-- # 'input' has two true values, so output has two coordinates.
                                +-- # 'input' has rank of 2, so coordinates have two indices.
                                +-- where(input) ==> [[0, 0],
                                +--                   [1, 0]]
                                +-- 
                                +-- # `input` tensor is [[[True, False]
                                +-- #                     [True, False]]
                                +-- #                    [[False, True]
                                +-- #                     [False, True]]
                                +-- #                    [[False, False]
                                +-- #                     [False, True]]]
                                +-- # 'input' has 5 true values, so output has 5 coordinates.
                                +-- # 'input' has rank of 3, so coordinates have three indices.
                                +-- where(input) ==> [[0, 0, 0],
                                +--                   [0, 1, 0],
                                +--                   [1, 0, 1],
                                +--                   [1, 1, 1],
                                +--                   [2, 1, 1]]
                                +-- ```
                                +where' :: 
                                +          Tensor v'1 Bool -- ^ __input__
                                +          -> Tensor Build Data.Int.Int64 -- ^ __index__
                                +where' = where'' id
                                +where'' :: OpParams ->
                                +           Tensor v'1 Bool -- ^ __input__
                                +           -> Tensor Build Data.Int.Int64 -- ^ __index__
                                +where'' op'options input | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "Where"
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type: DT_BOOL }
                                +output_arg { name: "index" type: DT_INT64 }
                                +-}
                                +
                                +-- | A Reader that outputs the entire contents of a file as a value.
                                +--
                                +-- To use, enqueue filenames in a Queue.  The output of ReaderRead will
                                +-- be a filename (key) and the contents of that file (value).
                                +wholeFileReader :: forall m' . (MonadBuild m') => 
                                +                   m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +wholeFileReader = wholeFileReader' id
                                +wholeFileReader' :: forall m' . (MonadBuild m') => OpParams ->
                                +                    m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
                                +wholeFileReader' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "WholeFileReader"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_STRING
                                +  is_ref: true
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +-}
                                +
                                +-- | A Reader that outputs the entire contents of a file as a value.
                                +--
                                +-- To use, enqueue filenames in a Queue.  The output of ReaderRead will
                                +-- be a filename (key) and the contents of that file (value).
                                +wholeFileReaderV2 :: forall m' . (MonadBuild m') => 
                                +                     m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +wholeFileReaderV2 = wholeFileReaderV2' id
                                +wholeFileReaderV2' :: forall m' . (MonadBuild m') => OpParams ->
                                +                      m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader.
                                +wholeFileReaderV2' op'options | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "WholeFileReaderV2"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "reader_handle"
                                +  description: "The handle to reference the Reader."
                                +  type: DT_RESOURCE
                                +}
                                +attr {
                                +  name: "container"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
                                +}
                                +attr {
                                +  name: "shared_name"
                                +  type: "string"
                                +  default_value { s: "" }
                                +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
                                +}
                                +-}
                                +
                                +-- | Writes contents to the file at input filename. Creates file and recursively
                                +--
                                +-- creates directory if not existing.
                                +writeFile :: forall v'1 v'2 m' . (MonadBuild m') => 
                                +             Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: scalar. The name of the file to which we write the contents.
                                +             -> Tensor v'2 Data.ByteString.ByteString -- ^ __contents__: scalar. The content to be written to the output file.
                                +             -> m' (ControlNode)
                                +writeFile = writeFile' id
                                +writeFile' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams ->
                                +              Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: scalar. The name of the file to which we write the contents.
                                +              -> Tensor v'2 Data.ByteString.ByteString -- ^ __contents__: scalar. The content to be written to the output file.
                                +              -> m' (ControlNode)
                                +writeFile' op'options filename contents | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename,
                                +                                                             buildInputs contents]
                                +        buildOp [] (opDef "WriteFile"
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "filename"
                                +  description: "scalar. The name of the file to which we write the contents."
                                +  type: DT_STRING
                                +}
                                +input_arg {
                                +  name: "contents"
                                +  description: "scalar. The content to be written to the output file."
                                +  type: DT_STRING
                                +}
                                +-}
                                +
                                +-- | Returns a tensor of zeros with the same shape and type as x.
                                +
                                +zerosLike :: forall v'1 t . (TensorType t) => 
                                +             Tensor v'1 t -- ^ __x__: a tensor of type T.
                                +             -> Tensor Build t -- ^ __y__: a tensor of the same shape and type as x but filled with zeros.
                                +zerosLike = zerosLike' id
                                +zerosLike' :: forall v'1 t . (TensorType t) => OpParams ->
                                +              Tensor v'1 t -- ^ __x__: a tensor of type T.
                                +              -> Tensor Build t -- ^ __y__: a tensor of the same shape and type as x but filled with zeros.
                                +zerosLike' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "ZerosLike"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "x" description: "a tensor of type T." type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "y"
                                +  description: "a tensor of the same shape and type as x but filled with zeros."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +-}
                                +
                                +-- | Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
                                +--
                                +-- The Hurwitz zeta function is defined as:
                                +-- 
                                +-- 
                                +-- \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
                                +zeta :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => 
                                +        Tensor v'1 t -- ^ __x__
                                +        -> Tensor v'2 t -- ^ __q__
                                +        -> Tensor Build t -- ^ __z__
                                +zeta = zeta' id
                                +zeta' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams ->
                                +         Tensor v'1 t -- ^ __x__
                                +         -> Tensor v'2 t -- ^ __q__
                                +         -> Tensor Build t -- ^ __z__
                                +zeta' op'options x q | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x,
                                +                                                             buildInputs q]
                                +        return (opDef "Zeta"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "T" }
                                +input_arg { name: "q" type_attr: "T" }
                                +output_arg { name: "z" type_attr: "T" }
                                +attr {
                                +  name: "T"
                                +  type: "type"
                                +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
                                +}
                                +-}
                                +
                                +-- | Creates a dataset that zips together `input_datasets`.
                                +
                                +zipDataset :: forall v'1 m' . (MonadBuild m') => 
                                +              [DataType] -- ^ __output_types__
                                +              -> [Tensor v'1 ResourceHandle] -- ^ __input_datasets__
                                +              -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +zipDataset = zipDataset' id
                                +zipDataset' :: forall v'1 m' . (MonadBuild m') => OpParams ->
                                +               [DataType] -- ^ __output_types__
                                +               -> [Tensor v'1 ResourceHandle] -- ^ __input_datasets__
                                +               -> m' (Tensor Value ResourceHandle) -- ^ __handle__
                                +zipDataset' op'options output_types
                                +            input_datasets | eqLengthGuard [("N", [("input_datasets", length input_datasets)])] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_datasets]
                                +        buildOp [] (opDef "ZipDataset"
                                +                    & opAttr "output_types" .~ output_types
                                +                    & opAttr "N" .~ n
                                +                    & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length input_datasets) :: Int64
                                +{-
                                +input_arg {
                                +  name: "input_datasets" type: DT_RESOURCE number_attr: "N"
                                +}
                                +output_arg { name: "handle" type: DT_RESOURCE }
                                +attr {
                                +  name: "output_types"
                                +  type: "list(type)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr {
                                +  name: "output_shapes"
                                +  type: "list(shape)"
                                +  has_minimum: true
                                +  minimum: 1
                                +}
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +-}
                                +
                                +-- | A graph node which represents an argument to a function.
                                +
                                +_Arg :: forall t m' . (MonadBuild m', TensorType t) => 
                                +        Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function.
                                +        -> m' (Tensor Value t) -- ^ __output__: The argument.
                                +_Arg = _Arg' id
                                +_Arg' :: forall t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +         Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function.
                                +         -> m' (Tensor Value t) -- ^ __output__: The argument.
                                +_Arg' op'options index | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "_Arg"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "index" .~ index
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "output" description: "The argument." type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "index"
                                +  type: "int"
                                +  description: "This argument is the index-th argument of the function."
                                +  has_minimum: true
                                +}
                                +-}
                                +
                                +-- | Converts an array of tensors to a list of tensors.
                                +
                                +_ArrayToList :: forall v'1 t out_types . (TensorType t,
                                +                                          TensorTypes out_types) => 
                                +                [Tensor v'1 t] -- ^ __input__
                                +                -> TensorList (Build) out_types -- ^ __output__
                                +_ArrayToList = _ArrayToList' id
                                +_ArrayToList' :: forall v'1 t out_types . (TensorType t,
                                +                                           TensorTypes out_types) => OpParams ->
                                +                 [Tensor v'1 t] -- ^ __input__
                                +                 -> TensorList (Build) out_types -- ^ __output__
                                +_ArrayToList' op'options
                                +              input | eqLengthGuard [("N", [("input", length input)])] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "_ArrayToList"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "out_types" .~ fromTensorTypes (Proxy :: Proxy out_types)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +  where
                                +    n = fromIntegral (length input) :: Int64
                                +{-
                                +input_arg { name: "input" type_attr: "T" number_attr: "N" }
                                +output_arg { name: "output" type_list_attr: "out_types" }
                                +attr { name: "T" type: "type" }
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +attr {
                                +  name: "out_types" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +-}
                                +
                                +-- | Cast x of type SrcT to y of DstT.
                                +--
                                +-- _HostCast requires its input and produces its output in host memory.
                                +_HostCast :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => 
                                +             Tensor v'1 srcT -- ^ __x__
                                +             -> Tensor Build dstT -- ^ __y__
                                +_HostCast = _HostCast' id
                                +_HostCast' :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) =>
                                +              OpParams ->
                                +              Tensor v'1 srcT -- ^ __x__
                                +              -> Tensor Build dstT -- ^ __y__
                                +_HostCast' op'options x | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x]
                                +        return (opDef "_HostCast"
                                +                & opAttr "SrcT" .~ tensorType (undefined :: srcT)
                                +                & opAttr "DstT" .~ tensorType (undefined :: dstT)
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "x" type_attr: "SrcT" }
                                +output_arg { name: "y" type_attr: "DstT" }
                                +attr { name: "SrcT" type: "type" }
                                +attr { name: "DstT" type: "type" }
                                +-}
                                +
                                +-- | Receives the named tensor from send_device on recv_device.
                                +--
                                +-- _HostRecv requires its input on host memory whereas _Recv requires its
                                +-- input on device memory.
                                +_HostRecv :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => 
                                +             Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
                                +             -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive.
                                +_HostRecv = _HostRecv' id
                                +_HostRecv' :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) =>
                                +              OpParams ->
                                +              Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
                                +              -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive.
                                +_HostRecv' op'options send_device_incarnation | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "_HostRecv"
                                +                    & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type)
                                +                    & opAttr "send_device_incarnation" .~ send_device_incarnation
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "tensor"
                                +  description: "The tensor to receive."
                                +  type_attr: "tensor_type"
                                +}
                                +attr { name: "tensor_type" type: "type" }
                                +attr {
                                +  name: "tensor_name"
                                +  type: "string"
                                +  description: "The name of the tensor to receive."
                                +}
                                +attr {
                                +  name: "send_device"
                                +  type: "string"
                                +  description: "The name of the device sending the tensor."
                                +}
                                +attr {
                                +  name: "send_device_incarnation"
                                +  type: "int"
                                +  description: "The current incarnation of send_device."
                                +}
                                +attr {
                                +  name: "recv_device"
                                +  type: "string"
                                +  description: "The name of the device receiving the tensor."
                                +}
                                +attr {
                                +  name: "client_terminated"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
                                +}
                                +-}
                                +
                                +-- | Sends the named tensor from send_device to recv_device.
                                +--
                                +-- _HostSend requires its input on host memory whereas _Send requires its
                                +-- input on device memory.
                                +_HostSend :: forall v'1 t m' . (MonadBuild m', TensorType t) => 
                                +             Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
                                +             -> Tensor v'1 t -- ^ __tensor__: The tensor to send.
                                +             -> m' (ControlNode)
                                +_HostSend = _HostSend' id
                                +_HostSend' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +              Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
                                +              -> Tensor v'1 t -- ^ __tensor__: The tensor to send.
                                +              -> m' (ControlNode)
                                +_HostSend' op'options send_device_incarnation tensor | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor]
                                +        buildOp [] (opDef "_HostSend"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "send_device_incarnation" .~ send_device_incarnation
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tensor" description: "The tensor to send." type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "tensor_name"
                                +  type: "string"
                                +  description: "The name of the tensor to send."
                                +}
                                +attr {
                                +  name: "send_device"
                                +  type: "string"
                                +  description: "The name of the device sending the tensor."
                                +}
                                +attr {
                                +  name: "send_device_incarnation"
                                +  type: "int"
                                +  description: "The current incarnation of send_device."
                                +}
                                +attr {
                                +  name: "recv_device"
                                +  type: "string"
                                +  description: "The name of the device receiving the tensor."
                                +}
                                +attr {
                                +  name: "client_terminated"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
                                +}
                                +-}
                                +
                                +-- | Converts a list of tensors to an array of tensors.
                                +
                                +_ListToArray :: forall v'1 tin t . (TensorTypes tin, TensorType t) => 
                                +                Data.Int.Int64 -- ^ __N__
                                +                -> TensorList (v'1) tin -- ^ __input__
                                +                -> [Tensor Build t] -- ^ __output__
                                +_ListToArray = _ListToArray' id
                                +_ListToArray' :: forall v'1 tin t . (TensorTypes tin, TensorType t) =>
                                +                 OpParams ->
                                +                 Data.Int.Int64 -- ^ __N__
                                +                 -> TensorList (v'1) tin -- ^ __input__
                                +                 -> [Tensor Build t] -- ^ __output__
                                +_ListToArray' op'options n input | eqLengthGuard [] =
                                +    pureOp [n] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        return (opDef "_ListToArray"
                                +                & opAttr "Tin" .~ fromTensorTypes (Proxy :: Proxy tin)
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "N" .~ n
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg { name: "input" type_list_attr: "Tin" }
                                +output_arg { name: "output" type_attr: "T" number_attr: "N" }
                                +attr {
                                +  name: "Tin" type: "list(type)" has_minimum: true minimum: 1
                                +}
                                +attr { name: "T" type: "type" }
                                +attr { name: "N" type: "int" has_minimum: true minimum: 1 }
                                +-}
                                +
                                +-- | Creates an empty Tensor with shape `shape` and type `dtype`.
                                +--
                                +-- The memory can optionally be initialized. This is usually useful in
                                +-- conjunction with inplace operations.
                                +_ParallelConcatStart :: forall dtype m' . (MonadBuild m', TensorType dtype) => 
                                +                        Shape -- ^ __shape__: 1-D `Tensor` indicating the shape of the output.
                                +                        -> m' (Tensor Value dtype) -- ^ __output__: An empty Tensor of the specified type.
                                +_ParallelConcatStart = _ParallelConcatStart' id
                                +_ParallelConcatStart' :: forall dtype m' . (MonadBuild m', TensorType dtype) =>
                                +                         OpParams ->
                                +                         Shape -- ^ __shape__: 1-D `Tensor` indicating the shape of the output.
                                +                         -> m' (Tensor Value dtype) -- ^ __output__: An empty Tensor of the specified type.
                                +_ParallelConcatStart' op'options shape | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "_ParallelConcatStart"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & opAttr "shape" .~ shape
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "output"
                                +  description: "An empty Tensor of the specified type."
                                +  type_attr: "dtype"
                                +}
                                +attr {
                                +  name: "shape"
                                +  type: "shape"
                                +  description: "1-D `Tensor` indicating the shape of the output."
                                +}
                                +attr {
                                +  name: "dtype"
                                +  type: "type"
                                +  description: "The element type of the returned tensor."
                                +}
                                +-}
                                +
                                +-- | Updates input `value` at `loc` with `update`.
                                +--
                                +-- If you use this function you will almost certainly want to add
                                +-- a control dependency as done in the implementation of parallel_stack to
                                +-- avoid race conditions.
                                +_ParallelConcatUpdate :: forall v'1 v'2 t . (TensorType t) => 
                                +                         Data.Int.Int64 -- ^ __loc__: A scalar indicating the index of the first dimension such that
                                +                                        -- value[loc, :] is updated.
                                +                         -> Tensor v'1 t -- ^ __value__: A `Tensor` object that will be updated in-place.
                                +                         -> Tensor v'2 t -- ^ __update__: A `Tensor` of rank one less than `value` if `loc` is a scalar,
                                +                                         -- otherwise of rank equal to `value` that contains the new values
                                +                                         -- for `value`.
                                +                         -> Tensor Build t -- ^ __output__: `value` that has been updated accordingly.
                                +_ParallelConcatUpdate = _ParallelConcatUpdate' id
                                +_ParallelConcatUpdate' :: forall v'1 v'2 t . (TensorType t) => OpParams ->
                                +                          Data.Int.Int64 -- ^ __loc__: A scalar indicating the index of the first dimension such that
                                +                                         -- value[loc, :] is updated.
                                +                          -> Tensor v'1 t -- ^ __value__: A `Tensor` object that will be updated in-place.
                                +                          -> Tensor v'2 t -- ^ __update__: A `Tensor` of rank one less than `value` if `loc` is a scalar,
                                +                                          -- otherwise of rank equal to `value` that contains the new values
                                +                                          -- for `value`.
                                +                          -> Tensor Build t -- ^ __output__: `value` that has been updated accordingly.
                                +_ParallelConcatUpdate' op'options loc value update | eqLengthGuard [] =
                                +    pureOp [] $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value,
                                +                                                             buildInputs update]
                                +        return (opDef "_ParallelConcatUpdate"
                                +                & opAttr "T" .~ tensorType (undefined :: t)
                                +                & opAttr "loc" .~ loc
                                +                & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "value"
                                +  description: "A `Tensor` object that will be updated in-place."
                                +  type_attr: "T"
                                +}
                                +input_arg {
                                +  name: "update"
                                +  description: "A `Tensor` of rank one less than `value` if `loc` is a scalar,\notherwise of rank equal to `value` that contains the new values\nfor `value`."
                                +  type_attr: "T"
                                +}
                                +output_arg {
                                +  name: "output"
                                +  description: "`value` that has been updated accordingly."
                                +  type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "loc"
                                +  type: "int"
                                +  description: "A scalar indicating the index of the first dimension such that\nvalue[loc, :] is updated."
                                +}
                                +-}
                                +
                                +-- | Receives the named tensor from send_device on recv_device.
                                +
                                +_Recv :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => 
                                +         Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
                                +         -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive.
                                +_Recv = _Recv' id
                                +_Recv' :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) =>
                                +          OpParams ->
                                +          Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
                                +          -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive.
                                +_Recv' op'options send_device_incarnation | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence []
                                +        buildOp [] (opDef "_Recv"
                                +                    & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type)
                                +                    & opAttr "send_device_incarnation" .~ send_device_incarnation
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +output_arg {
                                +  name: "tensor"
                                +  description: "The tensor to receive."
                                +  type_attr: "tensor_type"
                                +}
                                +attr { name: "tensor_type" type: "type" }
                                +attr {
                                +  name: "tensor_name"
                                +  type: "string"
                                +  description: "The name of the tensor to receive."
                                +}
                                +attr {
                                +  name: "send_device"
                                +  type: "string"
                                +  description: "The name of the device sending the tensor."
                                +}
                                +attr {
                                +  name: "send_device_incarnation"
                                +  type: "int"
                                +  description: "The current incarnation of send_device."
                                +}
                                +attr {
                                +  name: "recv_device"
                                +  type: "string"
                                +  description: "The name of the device receiving the tensor."
                                +}
                                +attr {
                                +  name: "client_terminated"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
                                +}
                                +-}
                                +
                                +-- | A graph node which represents a return value of a function.
                                +
                                +_Retval :: forall v'1 t m' . (MonadBuild m', TensorType t) => 
                                +           Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function.
                                +           -> Tensor v'1 t -- ^ __input__: The return value.
                                +           -> m' (ControlNode)
                                +_Retval = _Retval' id
                                +_Retval' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +            Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function.
                                +            -> Tensor v'1 t -- ^ __input__: The return value.
                                +            -> m' (ControlNode)
                                +_Retval' op'options index input | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input]
                                +        buildOp [] (opDef "_Retval"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "index" .~ index
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "input" description: "The return value." type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "index"
                                +  type: "int"
                                +  description: "This return value is the index-th return value of the function."
                                +  has_minimum: true
                                +}
                                +-}
                                +
                                +-- | Sends the named tensor from send_device to recv_device.
                                +
                                +_Send :: forall v'1 t m' . (MonadBuild m', TensorType t) => 
                                +         Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
                                +         -> Tensor v'1 t -- ^ __tensor__: The tensor to send.
                                +         -> m' (ControlNode)
                                +_Send = _Send' id
                                +_Send' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams ->
                                +          Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
                                +          -> Tensor v'1 t -- ^ __tensor__: The tensor to send.
                                +          -> m' (ControlNode)
                                +_Send' op'options send_device_incarnation tensor | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor]
                                +        buildOp [] (opDef "_Send"
                                +                    & opAttr "T" .~ tensorType (undefined :: t)
                                +                    & opAttr "send_device_incarnation" .~ send_device_incarnation
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "tensor" description: "The tensor to send." type_attr: "T"
                                +}
                                +attr { name: "T" type: "type" }
                                +attr {
                                +  name: "tensor_name"
                                +  type: "string"
                                +  description: "The name of the tensor to send."
                                +}
                                +attr {
                                +  name: "send_device"
                                +  type: "string"
                                +  description: "The name of the device sending the tensor."
                                +}
                                +attr {
                                +  name: "send_device_incarnation"
                                +  type: "int"
                                +  description: "The current incarnation of send_device."
                                +}
                                +attr {
                                +  name: "recv_device"
                                +  type: "string"
                                +  description: "The name of the device receiving the tensor."
                                +}
                                +attr {
                                +  name: "client_terminated"
                                +  type: "bool"
                                +  default_value { b: false }
                                +  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
                                +}
                                +-}
                                +
                                +-- | Reads the value of a variable without any memory model.
                                +--
                                +-- The tensor returned by this operation aliases a mutable Tensor, and its value
                                +-- can be observed to be different by different ops.
                                +-- 
                                +-- Internal and private to the tensorflow implementation.
                                +_UnsafeReadVariable :: forall v'1 dtype m' . (MonadBuild m',
                                +                                              TensorType dtype) => 
                                +                       Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                       -> m' (Tensor Value dtype) -- ^ __value__
                                +_UnsafeReadVariable = _UnsafeReadVariable' id
                                +_UnsafeReadVariable' :: forall v'1 dtype m' . (MonadBuild m',
                                +                                               TensorType dtype) => OpParams ->
                                +                        Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable.
                                +                        -> m' (Tensor Value dtype) -- ^ __value__
                                +_UnsafeReadVariable' op'options resource | eqLengthGuard [] =
                                +    build $ do
                                +        op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource]
                                +        buildOp [] (opDef "_UnsafeReadVariable"
                                +                    & opAttr "dtype" .~ tensorType (undefined :: dtype)
                                +                    & op'options & opInputs .~ op'inputs)
                                +{-
                                +input_arg {
                                +  name: "resource"
                                +  description: "handle to the resource in which to store the variable."
                                +  type: DT_RESOURCE
                                +}
                                +output_arg { name: "value" type_attr: "dtype" }
                                +attr {
                                +  name: "dtype" type: "type" description: "the dtype of the value."
                                +}
                                +-}
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/src/style.css b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt b/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt deleted file mode 100644 index 3645dcd..0000000 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt +++ /dev/null @@ -1,7082 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | Haskell wrappers for Core Tensorflow Ops. --- --- Code generated signatures for the Ops in libtensorflow. -@package tensorflow-core-ops -@version 0.1.0.0 - -module TensorFlow.GenOps.Core - --- | Raise a exception to abort the process when called. If --- exit_without_error is true, the process will exit normally, otherwise --- it will exit with a SIGABORT signal. --- --- Returns nothing but an exception. -abort :: (MonadBuild m') => m' (ControlNode) -abort' :: (MonadBuild m') => OpParams -> m' (ControlNode) - --- | Computes the absolute value of a tensor. --- --- Given a tensor x, this operation returns a tensor containing --- the absolute value of each element in x. For example, if x is --- an input element and y is an output element, this operation computes --- \(y = |x|\). -abs :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -abs' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Applies a gradient to a given accumulator. Does not add if local_step --- is lesser --- --- than the accumulator's global_step. -accumulatorApplyGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' (ControlNode) -accumulatorApplyGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' (ControlNode) - --- | Returns the number of gradients aggregated in the given accumulators. -accumulatorNumAccumulated :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) -accumulatorNumAccumulated' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) - --- | Updates the accumulator with a new value for global_step. Logs warning --- if the --- --- accumulator's value is already higher than new_global_step. -accumulatorSetGlobalStep :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 Int64 -> m' (ControlNode) -accumulatorSetGlobalStep' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> m' (ControlNode) - --- | Extracts the average gradient in the given ConditionalAccumulator, --- provided --- --- that sufficient (i.e., more than num_required) gradients have been --- accumulated. The op blocks until sufficient gradients have been --- accumulated. If the accumulator has already aggregated more than --- num_required gradients, it returns the average of the accumulated --- gradients. Also automatically increments the recorded global_step in --- the accumulator by 1, and resets the aggregate to 0. -accumulatorTakeGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype) -accumulatorTakeGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype) - --- | Computes acos of x element-wise. -acos :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -acos' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns x + y element-wise. --- ---
                                  ---
                                • NOTE*: Add supports broadcasting. AddN does not. --- More about broadcasting here
                                • ---
                                -add :: (OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -add' :: (OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Add an N-minibatch SparseTensor to a --- SparseTensorsMap, return N handles. --- --- A SparseTensor of rank R is represented by three --- tensors: sparse_indices, sparse_values, and --- sparse_shape, where --- --- ```sparse_indices.shape[1] == sparse_shape.shape[0] == R``` --- --- An N-minibatch of SparseTensor objects is --- represented as a SparseTensor having a first --- sparse_indices column taking values between `[0, N)`, where --- the minibatch size `N == sparse_shape[0]`. --- --- The input SparseTensor must have rank R greater than --- 1, and the first dimension is treated as the minibatch dimension. --- Elements of the SparseTensor must be sorted in increasing --- order of this first dimension. The stored SparseTensor --- objects pointed to by each row of the output sparse_handles --- will have rank `R-1`. --- --- The SparseTensor values can then be read out as part of a --- minibatch by passing the given keys as vector elements to --- TakeManySparseFromTensorsMap. To ensure the correct --- SparseTensorsMap is accessed, ensure that the same --- container and shared_name are passed to that Op. If --- no shared_name is provided here, instead use the *name* of --- the Operation created by calling AddManySparseToTensorsMap as --- the shared_name passed to --- TakeManySparseFromTensorsMap. Ensure the Operations are --- colocated. -addManySparseToTensorsMap :: (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) -addManySparseToTensorsMap' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) - --- | Add all input tensors element wise. -addN :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => [Tensor v'1 t] -> Tensor Build t -addN' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> [Tensor v'1 t] -> Tensor Build t - --- | Add a SparseTensor to a SparseTensorsMap return its --- handle. --- --- A SparseTensor is represented by three tensors: --- sparse_indices, sparse_values, and --- sparse_shape. --- --- This operator takes the given SparseTensor and adds it to a --- container object (a SparseTensorsMap). A unique key within --- this container is generated in the form of an int64, and this --- is the value that is returned. --- --- The SparseTensor can then be read out as part of a minibatch --- by passing the key as a vector element to --- TakeManySparseFromTensorsMap. To ensure the correct --- SparseTensorsMap is accessed, ensure that the same --- container and shared_name are passed to that Op. If --- no shared_name is provided here, instead use the *name* of --- the Operation created by calling AddSparseToTensorsMap as the --- shared_name passed to TakeManySparseFromTensorsMap. --- Ensure the Operations are colocated. -addSparseToTensorsMap :: (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) -addSparseToTensorsMap' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) - --- | Deprecated. Disallowed in GraphDef version >= 2. -adjustContrast :: (OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float -adjustContrast' :: (OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float - --- | Adjust the contrast of one or more images. --- --- images is a tensor of at least 3 dimensions. The last 3 --- dimensions are interpreted as `[height, width, channels]`. The other --- dimensions only represent a collection of images, such as `[batch, --- height, width, channels].` --- --- Contrast is adjusted independently for each channel of each image. --- --- For each channel, the Op first computes the mean of the image pixels --- in the channel and then adjusts each component of each pixel to `(x - --- mean) * contrast_factor + mean`. -adjustContrastv2 :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float -adjustContrastv2' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float - --- | Adjust the hue of one or more images. --- --- images is a tensor of at least 3 dimensions. The last --- dimension is interpretted as channels, and must be three. --- --- The input image is considered in the RGB colorspace. Conceptually, the --- RGB colors are first mapped into HSV. A delta is then applied all the --- hue values, and then remapped back to RGB colorspace. -adjustHue :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float -adjustHue' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float - --- | Adjust the saturation of one or more images. --- --- images is a tensor of at least 3 dimensions. The last --- dimension is interpretted as channels, and must be three. --- --- The input image is considered in the RGB colorspace. Conceptually, the --- RGB colors are first mapped into HSV. A scale is then applied all the --- saturation values, and then remapped back to RGB colorspace. -adjustSaturation :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float -adjustSaturation' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float - --- | Computes the "logical and" of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -all :: (OneOf '[Int32, Int64] tidx) => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool -all' :: (OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool - --- | Generates labels for candidate sampling with a learned unigram --- distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -allCandidateSampler :: Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -allCandidateSampler' :: OpParams -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) - --- | Computes the "logical or" of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -any :: (OneOf '[Int32, Int64] tidx) => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool -any' :: (OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool - --- | Update '*var' according to the adadelta scheme. --- --- accum = rho() * accum + (1 - rho()) * grad.square(); update = --- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; --- update_accum = rho() * update_accum + (1 - rho()) * update.square(); --- var -= update; -applyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t) -applyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t) - --- | Update '*var' according to the adagrad scheme. --- --- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) -applyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t) -applyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t) - --- | Update '*var' according to the proximal adagrad scheme. -applyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t) -applyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t) - --- | Update '*var' according to the Adam algorithm. --- --- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- --- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - --- beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + --- epsilon) -applyAdam :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t) -applyAdam' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t) - --- | Update '*var' according to the centered RMSProp algorithm. --- --- The centered RMSProp algorithm uses an estimate of the centered second --- moment (i.e., the variance) for normalization, as opposed to regular --- RMSProp, which uses the (uncentered) second moment. This often helps --- with training, but is slightly more expensive in terms of computation --- and memory. --- --- Note that in dense implementation of this algorithm, mg, ms, and mom --- will update even if the grad is zero, but in this sparse --- implementation, mg, ms, and mom will not update in iterations during --- which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 --- mean_grad = decay * mean_grad + (1-decay) * gradient --- --- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - --- mean_grad ** 2) --- --- mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + --- (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / --- sqrt(ms - mg * mg + epsilon) var <- var - mom -applyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) -applyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) - --- | Update '*var' according to the Ftrl-proximal scheme. --- --- accum_new = accum + grad * grad linear += grad + --- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 --- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - --- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new -applyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) -applyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) - --- | Update '*var' by subtracting alpha * delta from it. -applyGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t) -applyGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t) - --- | Update '*var' according to the momentum scheme. Set use_nesterov = --- True if you --- --- want to use Nesterov momentum. --- --- accum = accum * momentum + grad var -= lr * accum -applyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) -applyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) - --- | Update '*var' and '*accum' according to FOBOS with Adagrad learning --- rate. --- --- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var --- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} -applyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t) -applyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t) - --- | Update '*var' as FOBOS algorithm with fixed learning rate. --- --- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * --- max{|prox_v|-alpha*l1,0} -applyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) -applyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) - --- | Update '*var' according to the RMSProp algorithm. --- --- Note that in dense implementation of this algorithm, ms and mom will --- update even if the grad is zero, but in this sparse implementation, ms --- and mom will not update in iterations during which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = --- learning_rate * gradient / sqrt(mean_square + epsilon) --- --- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * --- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom -applyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) -applyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) - --- | Returns the index with the largest value across dimensions of a --- tensor. -argMax :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 -argMax' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 - --- | Returns the index with the smallest value across dimensions of a --- tensor. -argMin :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 -argMin' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 - --- | Converts each entry in the given tensor to strings. Supports many --- numeric --- --- types and boolean. -asString :: (OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => Tensor v'1 t -> Tensor Build ByteString -asString' :: (OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString - --- | Computes asin of x element-wise. -asin :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -asin' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Asserts that the given condition is true. --- --- If condition evaluates to false, print the list of tensors in --- `data`. summarize determines how many entries of the tensors --- to print. -assert :: (MonadBuild m', TensorTypes t) => Tensor v'1 Bool -> TensorList (v'2) t -> m' (ControlNode) -assert' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Bool -> TensorList (v'2) t -> m' (ControlNode) - --- | Update ref by assigning value to it. --- --- This operation outputs "ref" after the assignment is done. This makes --- it easier to chain operations that need to use the reset value. -assign :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) -assign' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) - --- | Update ref by adding value to it. --- --- This operation outputs "ref" after the update is done. This makes it --- easier to chain operations that need to use the reset value. -assignAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) -assignAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) - --- | Adds a value to the current value of a variable. --- --- Any ReadVariableOp which depends directly or indirectly on this assign --- is guaranteed to see the incremented value or a subsequent newer one. --- --- Outputs the incremented value, which can be used to totally order the --- increments to this variable. -assignAddVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) -assignAddVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) - --- | Update ref by subtracting value from it. --- --- This operation outputs "ref" after the update is done. This makes it --- easier to chain operations that need to use the reset value. -assignSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) -assignSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) - --- | Assigns a new value to a variable. --- --- Any ReadVariableOp with a control dependency on this op is guaranteed --- to return this value or a subsequent newer value of the variable. -assignVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) -assignVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) - --- | Computes atan of x element-wise. -atan :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -atan' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Outputs a Summary protocol buffer with audio. --- --- The summary has up to max_outputs summary values containing --- audio. The audio is built from tensor which must be 3-D with --- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, --- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` --- with a sample rate of sample_rate. --- --- The tag argument is a scalar Tensor of type --- string. It is used to build the tag of the summary --- values: --- ---
                                  ---
                                • If max_outputs is 1, the summary value tag is --- '*tag*/audio'.
                                • ---
                                • If max_outputs is greater than 1, the summary value tags --- are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', --- etc.
                                • ---
                                -audioSummary :: Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString -audioSummary' :: OpParams -> Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString - --- | Outputs a Summary protocol buffer with audio. --- --- The summary has up to max_outputs summary values containing --- audio. The audio is built from tensor which must be 3-D with --- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, --- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` --- with a sample rate of sample_rate. --- --- The tag argument is a scalar Tensor of type --- string. It is used to build the tag of the summary --- values: --- ---
                                  ---
                                • If max_outputs is 1, the summary value tag is --- '*tag*/audio'.
                                • ---
                                • If max_outputs is greater than 1, the summary value tags --- are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', --- etc.
                                • ---
                                -audioSummaryV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString -audioSummaryV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString - --- | Performs average pooling on the input. --- --- Each entry in output is the mean of the corresponding size --- ksize window in value. -avgPool :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -avgPool' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Performs 3D average pooling on the input. -avgPool3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -avgPool3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes gradients of average pooling function. -avgPool3DGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t -avgPool3DGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t - --- | Computes gradients of the average pooling function. -avgPoolGrad :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t -avgPoolGrad' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t - --- | Defines a barrier that persists across different graph executions. --- --- A barrier represents a key-value map, where each key is a string, and --- each value is a tuple of tensors. --- --- At runtime, the barrier contains complete and --- incomplete elements. A complete element has defined tensors --- for all components of its value tuple, and may be accessed using --- BarrierTakeMany. An incomplete element has some undefined components --- in its value tuple, and may be updated using BarrierInsertMany. -barrier :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) -barrier' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) - --- | Closes the given barrier. --- --- This operation signals that no more new elements will be inserted in --- the given barrier. Subsequent InsertMany that try to introduce a new --- key will fail. Subsequent InsertMany operations that just add missing --- components to already existing elements will continue to succeed. --- Subsequent TakeMany operations will continue to succeed if sufficient --- completed elements remain in the barrier. Subsequent TakeMany --- operations that would block will fail immediately. -barrierClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) -barrierClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) - --- | Computes the number of incomplete elements in the given barrier. -barrierIncompleteSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) -barrierIncompleteSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) - --- | For each key, assigns the respective value to the specified component. --- --- If a key is not found in the barrier, this operation will create a new --- incomplete element. If a key is found in the barrier, and the element --- already has a value at component_index, this operation will fail with --- INVALID_ARGUMENT, and leave the barrier in an undefined state. -barrierInsertMany :: (MonadBuild m', TensorType t) => Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' (ControlNode) -barrierInsertMany' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' (ControlNode) - --- | Computes the number of complete elements in the given barrier. -barrierReadySize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) -barrierReadySize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) - --- | Takes the given number of completed elements from a barrier. --- --- This operation concatenates completed-element component tensors along --- the 0th dimension to make a single component tensor. --- --- Elements come out of the barrier when they are complete, and in the --- order in which they were placed into the barrier. The indices output --- provides information about the batch in which each element was --- originally inserted into the barrier. -barrierTakeMany :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value ByteString, TensorList (Value) component_types)) -barrierTakeMany' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value ByteString, TensorList (Value) component_types)) -batchCholesky :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -batchCholesky' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -batchCholeskyGrad :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -batchCholeskyGrad' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -batchFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchIFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchIFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchIFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchIFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchIFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -batchIFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) - --- | Multiplies slices of two tensors in batches. --- --- Multiplies all slices of Tensor x and y (each --- slice can be viewed as an element of a batch), and arranges the --- individual results in a single output tensor of the same batch size. --- Each of the individual slices can optionally be adjointed (to adjoint --- a matrix means to transpose and conjugate it) before multiplication by --- setting the adj_x or adj_y flag to True, --- which are by default False. --- --- The input tensors x and y are 3-D or higher with --- shape `[..., r_x, c_x]` and `[..., r_y, c_y]`. --- --- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, --- where: --- --- r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y --- --- It is computed as: --- --- output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) -batchMatMul :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -batchMatMul' :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -batchMatrixBandPart :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t -batchMatrixBandPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t -batchMatrixDeterminant :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -batchMatrixDeterminant' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -batchMatrixDiag :: (TensorType t) => Tensor v'1 t -> Tensor Build t -batchMatrixDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -batchMatrixDiagPart :: (TensorType t) => Tensor v'1 t -> Tensor Build t -batchMatrixDiagPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -batchMatrixInverse :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -batchMatrixInverse' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -batchMatrixSetDiag :: (TensorType t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -batchMatrixSetDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -batchMatrixSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -batchMatrixSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -batchMatrixSolveLs :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t -batchMatrixSolveLs' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t -batchMatrixTriangularSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -batchMatrixTriangularSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Batch normalization. --- --- This op is deprecated. Prefer `tf.nn.batch_normalization`. -batchNormWithGlobalNormalization :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t -batchNormWithGlobalNormalization' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t - --- | Gradients for batch normalization. --- --- This op is deprecated. See `tf.nn.batch_normalization`. -batchNormWithGlobalNormalizationGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -batchNormWithGlobalNormalizationGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -batchSelfAdjointEig :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -batchSelfAdjointEig' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -batchSelfAdjointEigV2 :: (OneOf '[Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t) -batchSelfAdjointEigV2' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t) -batchSvd :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) -batchSvd' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) - --- | BatchToSpace for 4-D tensors of type T. --- --- This is a legacy version of the more general BatchToSpaceND. --- --- Rearranges (permutes) data from batch into blocks of spatial data, --- followed by cropping. This is the reverse transformation of --- SpaceToBatch. More specifically, this op outputs a copy of the input --- tensor where values from the batch dimension are moved in --- spatial blocks to the height and width dimensions, --- followed by cropping along the height and width --- dimensions. -batchToSpace :: (TensorType t, OneOf '[Int32, Int64] tidx) => Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -batchToSpace' :: (TensorType t, OneOf '[Int32, Int64] tidx) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | BatchToSpace for N-D tensors of type T. --- --- This operation reshapes the "batch" dimension 0 into `M + 1` --- dimensions of shape `block_shape + [batch]`, interleaves these blocks --- back into the grid defined by the spatial dimensions `[1, ..., M]`, to --- obtain a result with the same rank as the input. The spatial --- dimensions of this intermediate result are then optionally cropped --- according to crops to produce the output. This is the reverse --- of SpaceToBatch. See below for a precise description. -batchToSpaceND :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t -batchToSpaceND' :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t - --- | Compute the regularized incomplete beta integral \(I_x(a, b)\). --- --- The regularized incomplete beta integral is defined as: --- --- ``` I_x(a, b) = frac{B(x; a, b)}{B(a, b)} ``` where --- --- ``` B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt ``` --- --- is the incomplete beta function and \(B(a, b)\) is the *complete* beta --- function. -betainc :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -betainc' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Adds bias to value. --- --- This is a special case of `tf.add` where bias is restricted --- to be 1-D. Broadcasting is supported, so value may have any --- number of dimensions. -biasAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -biasAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | The backward operation for BiasAdd on the "bias" tensor. --- --- It accumulates all the values from out_backprop into the feature --- dimension. For NHWC data format, the feature dimension is the last. --- For NCHW data format, the feature dimension is the third-to-last. -biasAddGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -biasAddGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Adds bias to value. --- --- This is a deprecated version of BiasAdd and will be soon removed. --- --- This is a special case of `tf.add` where bias is restricted --- to be 1-D. Broadcasting is supported, so value may have any --- number of dimensions. -biasAddV1 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -biasAddV1' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Bitcasts a tensor from one type to another without copying data. --- --- Given a tensor input, this operation returns a tensor that --- has the same buffer data as input with datatype `type`. --- --- If the input datatype T is larger than the output datatype --- `type` then the shape changes from [...] to [..., --- sizeof(T)/sizeof(`type`)]. --- --- If T is smaller than `type`, the operator requires that the --- rightmost dimension be equal to sizeof(`type`)/sizeof(T). The --- shape then goes from [..., sizeof(`type`)/sizeof(T)] to --- [...]. --- ---
                                  ---
                                • NOTE*: Bitcast is implemented as a low-level cast, so machines --- with different endian orderings will give different results.
                                • ---
                                -bitcast :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => Tensor v'1 t -> Tensor Build type' -bitcast' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => OpParams -> Tensor v'1 t -> Tensor Build type' - --- | Return the shape of s0 op s1 with broadcast. --- --- Given s0 and s1, tensors that represent shapes, --- compute r0, the broadcasted shape. s0, s1 --- and r0 are all integer vectors. -broadcastArgs :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -broadcastArgs' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Return the reduction indices for computing gradients of s0 op s1 with --- broadcast. --- --- This is typically used by gradient computations for a broadcasting --- operation. -broadcastGradientArgs :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) -broadcastGradientArgs' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) - --- | Performs beam search decoding on the logits given in input. --- --- A note about the attribute merge_repeated: For the beam search --- decoder, this means that if consecutive entries in a beam are the --- same, only the first of these is emitted. That is, when the top path --- is "A B B B B", "A B" is returned if merge_repeated = True but "A B B --- B B" is returned if merge_repeated = False. -cTCBeamSearchDecoder :: Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float) -cTCBeamSearchDecoder' :: OpParams -> Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float) - --- | Performs greedy decoding on the logits given in inputs. --- --- A note about the attribute merge_repeated: if enabled, when --- consecutive logits' maximum indices are the same, only the first of --- these is emitted. Labeling the blank *, the sequence "A B B * B --- B" becomes "A B" if merge_repeated = True and "A B B B B" if --- merge_repeated = False. --- --- Regardless of the value of merge_repeated, if the maximum index of a --- given time and batch corresponds to the blank, index `(num_classes - --- 1)`, no new element is emitted. -cTCGreedyDecoder :: Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float) -cTCGreedyDecoder' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float) - --- | Calculates the CTC Loss (log probability) for each batch entry. Also --- calculates --- --- the gradient. This class performs the softmax operation for you, so --- inputs should be e.g. linear projections of outputs by an LSTM. -cTCLoss :: Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float) -cTCLoss' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float) - --- | Cast x of type SrcT to y of DstT. -cast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT -cast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT - --- | Returns element-wise smallest integer in not less than x. -ceil :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -ceil' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Checks a tensor for NaN and Inf values. --- --- When run, reports an InvalidArgument error if tensor --- has any values that are not a number (NaN) or infinity (Inf). --- Otherwise, passes tensor as-is. -checkNumerics :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -checkNumerics' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the Cholesky decomposition of one or more square matrices. --- --- The input is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices, with the same constraints as the --- single matrix Cholesky decomposition above. The output is a tensor of --- the same shape as the input containing the Cholesky decompositions for --- all input submatrices `[..., :, :]`. -cholesky :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -cholesky' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the reverse mode backpropagated gradient of the Cholesky --- algorithm. --- --- For an explanation see "Differentiation of the Cholesky algorithm" by --- Iain Murray http://arxiv.org/abs/1602.07527. -choleskyGrad :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -choleskyGrad' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Converts two real numbers to a complex number. --- --- Given a tensor real representing the real part of a complex --- number, and a tensor imag representing the imaginary part of a --- complex number, this operation returns complex numbers elementwise of --- the form \(a + bj\), where *a* represents the real part and *b* --- represents the imag part. --- --- The input tensors real and imag must have the same --- shape. --- --- For example: --- --- ``` # tensor real is [2.25, 3.25] # tensor imag is --- [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + --- 5.75j]] ``` -complex :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout -complex' :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout - --- | Computes the complex absolute value of a tensor. --- --- Given a tensor x of complex numbers, this operation returns a --- tensor of type float or double that is the absolute --- value of each element in x. All elements in x must --- be complex numbers of the form \(a + bj\). The absolute value is --- computed as \( sqrt{a^2 + b^2}\). -complexAbs :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout -complexAbs' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout - --- | Computes the ids of the positions in sampled_candidates that match --- true_labels. --- --- When doing log-odds NCE, the result of this op should be passed --- through a SparseToDense op, then added to the logits of the sampled --- candidates. This has the effect of removing the sampled --- labels that match the true labels by making the classifier sure that --- they are sampled labels. -computeAccidentalHits :: Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float) -computeAccidentalHits' :: OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float) - --- | Concatenates tensors along one dimension. -concat :: (TensorType t) => Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t -concat' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t - --- | Computes offsets of concat inputs within its output. --- --- For example: --- --- ```prettyprint # x is [2, 2, 7] # y is [2, 3, 7] # --- z is [2, 5, 7] concat_offset(2, [x, y, z]) => [0, 0, 0], --- [0, 2, 0], [0, 5, 0] ``` -concatOffset :: Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32] -concatOffset' :: OpParams -> Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32] - --- | Concatenates tensors along one dimension. -concatV2 :: (TensorType t, OneOf '[Int32, Int64] tidx) => [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t -concatV2' :: (TensorType t, OneOf '[Int32, Int64] tidx) => OpParams -> [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t - --- | A conditional accumulator for aggregating gradients. The accumulator --- accepts --- --- gradients marked with local_step greater or equal to the most recent --- global_step known to the accumulator. The average can be extracted --- from the accumulator, provided sufficient gradients have been --- accumulated. Extracting the average automatically resets the aggregate --- to 0, and increments the global_step recorded by the accumulator. -conditionalAccumulator :: (MonadBuild m') => DataType -> Shape -> m' (Tensor Ref ByteString) -conditionalAccumulator' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString) - --- | Returns the complex conjugate of a complex number. --- --- Given a tensor input of complex numbers, this operation --- returns a tensor of complex numbers that are the complex conjugate of --- each element in input. The complex numbers in input --- must be of the form \(a + bj\), where *a* is the real part and *b* is --- the imaginary part. --- --- The complex conjugate returned by this operation is of the form \(a - --- bj\). --- --- For example: --- --- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] --- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] ``` -conj :: (OneOf '[Complex Double, Complex Float] t) => Tensor v'1 t -> Tensor Build t -conj' :: (OneOf '[Complex Double, Complex Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns a constant tensor. -const :: (TensorType dtype) => Tensor Build dtype -const' :: (TensorType dtype) => OpParams -> Tensor Build dtype - --- | Does nothing. Serves as a control trigger for scheduling. --- --- Only useful as a placeholder for control edges. -controlTrigger :: (MonadBuild m') => m' (ControlNode) -controlTrigger' :: (MonadBuild m') => OpParams -> m' (ControlNode) - --- | Computes a 2-D convolution given 4-D input and filter --- tensors. --- --- Given an input tensor of shape `[batch, in_height, in_width, --- in_channels]` and a filter / kernel tensor of shape `[filter_height, --- filter_width, in_channels, out_channels]`, this op performs the --- following: --- ---
                                  ---
                                1. Flattens the filter to a 2-D matrix with shape `[filter_height * --- filter_width * in_channels, output_channels]`.
                                2. ---
                                3. Extracts image patches from the input tensor to form a *virtual* --- tensor of shape `[batch, out_height, out_width, filter_height * --- filter_width * in_channels]`.
                                4. ---
                                5. For each patch, right-multiplies the filter matrix and the image --- patch vector.
                                6. ---
                                --- --- In detail, with the default NHWC format, --- --- output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, --- strides[2] * j + dj, q] * filter[di, dj, q, k] --- --- Must have `strides[0] = strides[3] = 1`. For the most common case of --- the same horizontal and vertices strides, `strides = [1, stride, --- stride, 1]`. -conv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -conv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the gradients of convolution with respect to the filter. -conv2DBackpropFilter :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t -conv2DBackpropFilter' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t - --- | Computes the gradients of convolution with respect to the input. -conv2DBackpropInput :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -conv2DBackpropInput' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Computes a 3-D convolution given 5-D input and filter --- tensors. --- --- In signal processing, cross-correlation is a measure of similarity of --- two waveforms as a function of a time-lag applied to one of them. This --- is also known as a sliding dot product or sliding inner-product. --- --- Our Conv3D implements a form of cross-correlation. -conv3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -conv3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the gradients of 3-D convolution with respect to the filter. -conv3DBackpropFilter :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -conv3DBackpropFilter' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Computes the gradients of 3-D convolution with respect to the filter. -conv3DBackpropFilterV2 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t -conv3DBackpropFilterV2' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t - --- | Computes the gradients of 3-D convolution with respect to the input. -conv3DBackpropInput :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -conv3DBackpropInput' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Computes the gradients of 3-D convolution with respect to the input. -conv3DBackpropInputV2 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -conv3DBackpropInputV2' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Copy Op. --- --- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on --- the device on which the tensor is allocated. --- --- Unlike the CopyHost Op, this op does not have HostMemory constraint on --- its input or output. -copy :: (TensorType t) => Tensor v'1 t -> Tensor Build t -copy' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Copy Host Op. --- --- Performs CPU-to-CPU deep-copying of tensor. --- --- Unlike the Copy Op, this op has HostMemory constraint on its input or --- output. -copyHost :: (TensorType t) => Tensor v'1 t -> Tensor Build t -copyHost' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes cos of x element-wise. -cos :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -cos' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Increments ref until it reaches limit. -countUpTo :: (MonadBuild m', OneOf '[Int32, Int64] t) => Int64 -> Tensor Ref t -> m' (Tensor Value t) -countUpTo' :: (MonadBuild m', OneOf '[Int32, Int64] t) => OpParams -> Int64 -> Tensor Ref t -> m' (Tensor Value t) - --- | Extracts crops from the input image tensor and bilinearly resizes them --- (possibly --- --- with aspect ratio change) to a common output size specified by --- crop_size. This is more general than the --- crop_to_bounding_box op which extracts a fixed size slice --- from the input image and does not allow resizing or aspect ratio --- change. --- --- Returns a tensor with crops from the input image at --- positions defined at the bounding box locations in boxes. The --- cropped boxes are all resized (with bilinear interpolation) to a fixed --- `size = [crop_height, crop_width]`. The result is a 4-D tensor --- `[num_boxes, crop_height, crop_width, depth]`. -cropAndResize :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float -cropAndResize' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float - --- | Computes the gradient of the crop_and_resize op wrt the input boxes --- tensor. -cropAndResizeGradBoxes :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float -cropAndResizeGradBoxes' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float - --- | Computes the gradient of the crop_and_resize op wrt the input image --- tensor. -cropAndResizeGradImage :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t -cropAndResizeGradImage' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t - --- | Compute the pairwise cross product. --- --- a and b must be the same shape; they can either be --- simple 3-element vectors, or any shape where the innermost dimension --- is 3. In the latter case, each pair of corresponding 3-element vectors --- is cross-multiplied independently. -cross :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -cross' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Compute the cumulative product of the tensor x along --- axis. --- --- By default, this op performs an inclusive cumprod, which means that --- the first element of the input is identical to the first element of --- the output: ```prettyprint tf.cumprod([a, b, c]) ==> [a, a * b, a * --- b * c] ``` --- --- By setting the exclusive kwarg to True, an exclusive --- cumprod is performed instead: ```prettyprint tf.cumprod([a, b, c], --- exclusive=True) ==> [0, a, a * b] ``` --- --- By setting the reverse kwarg to True, the cumprod is --- performed in the opposite direction: ```prettyprint tf.cumprod([a, b, --- c], reverse=True) ==> [a * b * c, b * c, c] ``` This is more --- efficient than using separate `tf.reverse` ops. --- --- The reverse and exclusive kwargs can also be combined: --- ```prettyprint tf.cumprod([a, b, c], exclusive=True, reverse=True) --- ==> [b * c, c, 0] ``` -cumprod :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -cumprod' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Compute the cumulative sum of the tensor x along --- axis. --- --- By default, this op performs an inclusive cumsum, which means that the --- first element of the input is identical to the first element of the --- output: ```prettyprint tf.cumsum([a, b, c]) ==> [a, a + b, a + b + --- c] ``` --- --- By setting the exclusive kwarg to True, an exclusive --- cumsum is performed instead: ```prettyprint tf.cumsum([a, b, c], --- exclusive=True) ==> [0, a, a + b] ``` --- --- By setting the reverse kwarg to True, the cumsum is --- performed in the opposite direction: ```prettyprint tf.cumsum([a, b, --- c], reverse=True) ==> [a + b + c, b + c, c] ``` This is more --- efficient than using separate `tf.reverse` ops. --- --- The reverse and exclusive kwargs can also be combined: --- ```prettyprint tf.cumsum([a, b, c], exclusive=True, reverse=True) --- ==> [b + c, c, 0] ``` -cumsum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -cumsum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Debug Identity Op. --- --- Provides an identity mapping of the non-Ref type input tensor for --- debugging. -debugIdentity :: (TensorType t) => Tensor v'1 t -> Tensor Build t -debugIdentity' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Debug NaN Value Counter Op --- --- Counts number of NaNs in the input tensor, for debugging. -debugNanCount :: (TensorType t) => Tensor v'1 t -> Tensor Build Int64 -debugNanCount' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Int64 - --- | Debug Numeric Summary Op. --- --- Provide a basic summary of numeric value types, range and --- distribution. -debugNumericSummary :: (TensorType t) => Tensor v'1 t -> Tensor Build Double -debugNumericSummary' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Double - --- | Decode web-safe base64-encoded strings. --- --- Input may or may not have padding at the end. See EncodeBase64 for --- padding. Web-safe means that input must use - and _ instead of + and --- /. -decodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString -decodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString - --- | Convert CSV records to tensors. Each column maps to one tensor. --- --- RFC 4180 format is expected for the CSV records. --- (https:/tools.ietf.orghtml/rfc4180) Note that we allow leading --- and trailing spaces with int or float field. -decodeCSV :: (OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE) => Tensor v'1 ByteString -> TensorList (v'2) oUT_TYPE -> TensorList (Build) oUT_TYPE -decodeCSV' :: (OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE) => OpParams -> Tensor v'1 ByteString -> TensorList (v'2) oUT_TYPE -> TensorList (Build) oUT_TYPE - --- | Decode the first frame of a GIF-encoded image to a uint8 tensor. --- --- GIF with frame or transparency compression are not supported convert --- animated GIF from compressed to uncompressed by: --- --- convert $src.gif -coalesce $dst.gif -decodeGif :: Tensor v'1 ByteString -> Tensor Build Word8 -decodeGif' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8 - --- | Convert JSON-encoded Example records to binary protocol buffer --- strings. --- --- This op translates a tensor containing Example records, encoded using --- the standard JSON mapping, into a tensor containing the same --- records encoded as binary protocol buffers. The resulting tensor can --- then be fed to any of the other Example-parsing ops. -decodeJSONExample :: Tensor v'1 ByteString -> Tensor Build ByteString -decodeJSONExample' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString - --- | Decode a JPEG-encoded image to a uint8 tensor. --- --- The attr channels indicates the desired number of color --- channels for the decoded image. --- --- Accepted values are: --- ---
                                  ---
                                • 0: Use the number of channels in the JPEG-encoded image.
                                • ---
                                • 1: output a grayscale image.
                                • ---
                                • 3: output an RGB image.
                                • ---
                                --- --- If needed, the JPEG-encoded image is transformed to match the --- requested number of color channels. --- --- The attr ratio allows downscaling the image by an integer --- factor during decoding. Allowed values are: 1, 2, 4, and 8. This is --- much faster than downscaling the image later. -decodeJpeg :: Tensor v'1 ByteString -> Tensor Build Word8 -decodeJpeg' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8 - --- | Decode a PNG-encoded image to a uint8 or uint16 tensor. --- --- The attr channels indicates the desired number of color --- channels for the decoded image. --- --- Accepted values are: --- ---
                                  ---
                                • 0: Use the number of channels in the PNG-encoded image.
                                • ---
                                • 1: output a grayscale image.
                                • ---
                                • 3: output an RGB image.
                                • ---
                                • 4: output an RGBA image.
                                • ---
                                --- --- If needed, the PNG-encoded image is transformed to match the requested --- number of color channels. -decodePng :: (OneOf '[Word16, Word8] dtype) => Tensor v'1 ByteString -> Tensor Build dtype -decodePng' :: (OneOf '[Word16, Word8] dtype) => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype - --- | Reinterpret the bytes of a string as a vector of numbers. -decodeRaw :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type) => Tensor v'1 ByteString -> Tensor Build out_type -decodeRaw' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type - --- | Delete the tensor specified by its handle in the session. -deleteSessionTensor :: (MonadBuild m') => Tensor v'1 ByteString -> m' (ControlNode) -deleteSessionTensor' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> m' (ControlNode) - --- | Applies set operation along last dimension of 2 Tensor inputs. --- --- See SetOperationOp::SetOperationFromContext for values of --- set_operation. --- --- Output result is a SparseTensor represented by --- result_indices, result_values, and --- result_shape. For set1 and set2 ranked --- n, this has rank n and the same 1st `n-1` dimensions --- as set1 and set2. The nth dimension --- contains the result of set_operation applied to the --- corresponding `[0...n-1]` dimension of set. -denseToDenseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -denseToDenseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) - --- | Applies set operation along last dimension of Tensor and --- SparseTensor. --- --- See SetOperationOp::SetOperationFromContext for values of --- set_operation. --- --- Input set2 is a SparseTensor represented by --- set2_indices, set2_values, and set2_shape. --- For set2 ranked n, 1st `n-1` dimensions must be the --- same as set1. Dimension n contains values in a set, --- duplicates are allowed but ignored. --- --- If validate_indices is True, this op validates the --- order and range of set2 indices. --- --- Output result is a SparseTensor represented by --- result_indices, result_values, and --- result_shape. For set1 and set2 ranked --- n, this has rank n and the same 1st `n-1` dimensions --- as set1 and set2. The nth dimension --- contains the result of set_operation applied to the --- corresponding `[0...n-1]` dimension of set. -denseToSparseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -denseToSparseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) - --- | DepthToSpace for tensors of type T. --- --- Rearranges data from depth into blocks of spatial data. This is the --- reverse transformation of SpaceToDepth. More specifically, this op --- outputs a copy of the input tensor where values from the --- depth dimension are moved in spatial blocks to the --- height and width dimensions. The attr --- block_size indicates the input block size and how the data is --- moved. --- ---
                                  ---
                                • Chunks of data of size `block_size * block_size` from depth are --- rearranged into non-overlapping blocks of size `block_size x --- block_size`
                                • ---
                                • The width the output tensor is `input_depth * block_size`, whereas --- the height is `input_height * block_size`.
                                • ---
                                • The depth of the input tensor must be divisible by `block_size * --- block_size`.
                                • ---
                                --- --- That is, assuming the input is in the shape: `[batch, height, width, --- depth]`, the shape of the output will be: `[batch, height*block_size, --- width*block_size, depth/(block_size*block_size)]` --- --- This operation requires that the input tensor be of rank 4, and that --- block_size be >=1 and that `block_size * block_size` be a --- divisor of the input depth. --- --- This operation is useful for resizing the activations between --- convolutions (but keeping all data), e.g. instead of pooling. It is --- also useful for training purely convolutional models. --- --- For example, given this input of shape `[1, 1, 1, 4]`, and a block --- size of 2: --- --- ```prettyprint x = [[[[1, 2, 3, 4]]]] --- --- ``` --- --- This operation will output a tensor of shape `[1, 2, 2, 1]`: --- --- ```prettyprint [[[[1], [2]], [[3], [4]]]] ``` --- --- Here, the input has a batch of 1 and each batch element has shape `[1, --- 1, 4]`, the corresponding output will have 2x2 elements and will have --- a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output --- element shape is `[2, 2, 1]`. --- --- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, --- e.g. --- --- ```prettyprint x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` --- --- This operation, for block size of 2, will return the following tensor --- of shape `[1, 2, 2, 3]` --- --- ```prettyprint [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] --- --- ``` --- --- Similarly, for the following input of shape `[1 2 2 4]`, and a block --- size of 2: --- --- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], --- [13, 14, 15, 16]]]] ``` --- --- the operator will return the following tensor of shape `[1 4 4 1]`: --- --- ```prettyprint x = [[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [ --- [9], [10], [13], [14]], [ [11], [12], [15], [16]]] --- --- ``` -depthToSpace :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor Build t -depthToSpace' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t - --- | Computes a 2-D depthwise convolution given 4-D input and --- filter tensors. --- --- Given an input tensor of shape `[batch, in_height, in_width, --- in_channels]` and a filter / kernel tensor of shape `[filter_height, --- filter_width, in_channels, channel_multiplier]`, containing --- in_channels convolutional filters of depth 1, --- depthwise_conv2d applies a different filter to each input --- channel (expanding from 1 channel to channel_multiplier --- channels for each), then concatenates the results together. Thus, the --- output has `in_channels * channel_multiplier` channels. --- --- for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, --- i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * --- i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] --- --- Must have `strides[0] = strides[3] = 1`. For the most common case of --- the same horizontal and vertices strides, `strides = [1, stride, --- stride, 1]`. -depthwiseConv2dNative :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -depthwiseConv2dNative' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the gradients of depthwise convolution with respect to the --- filter. -depthwiseConv2dNativeBackpropFilter :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t -depthwiseConv2dNativeBackpropFilter' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t - --- | Computes the gradients of depthwise convolution with respect to the --- input. -depthwiseConv2dNativeBackpropInput :: (OneOf '[Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -depthwiseConv2dNativeBackpropInput' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Dequantize the input tensor into a float Tensor. --- ---
                                  ---
                                • min_range, max_range are scalar floats that specify the --- range for the input data. The mode attribute --- controls exactly which calculations are used to convert the float --- values to their quantized equivalents.
                                • ---
                                --- --- In MIN_COMBINED mode, each value of the tensor will undergo --- the following: --- --- ``` if T == qint8, in[i] += (range(T) + 1)/ 2.0 out[i] = min_range + --- (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) = --- numeric_limitsT::max() - numeric_limitsT::min()` --- ---
                                  ---
                                • MIN_COMBINED Mode Example*
                                • ---
                                --- --- If the input comes from a QuantizedRelu6, the output type is quint8 --- (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The --- min_range and max_range values are therefore 0.0 and 6.0. Dequantize --- on quint8 will take each value, cast to float, and multiply by 6 / --- 255. Note that if quantizedtype is qint8, the operation will --- additionally add each value by 128 prior to casting. --- --- If the mode is MIN_FIRST, then this approach is used: --- --- ``` number_of_steps = 1 << (# of bits in T) range_adjust = --- number_of_steps / (number_of_steps - 1) range = (range_max - --- range_min) * range_adjust range_scale = range / number_of_steps const --- double offset_input = static_castdouble(input) - --- lowest_quantized; result = range_min + ((input - --- numeric_limitsT::min()) * range_scale) ``` -dequantize :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float -dequantize' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float - --- | Deserialize and concatenate SparseTensors from a serialized --- minibatch. --- --- The input serialized_sparse must be a string matrix of shape --- `[N x 3]` where N is the minibatch size and the rows --- correspond to packed outputs of SerializeSparse. The ranks of --- the original SparseTensor objects must all match. When the --- final SparseTensor is created, it has rank one higher than --- the ranks of the incoming SparseTensor objects (they have --- been concatenated along a new row dimension). --- --- The output SparseTensor object's shape values for all --- dimensions but the first are the max across the input --- SparseTensor objects' shape values for the corresponding --- dimensions. Its first shape value is N, the minibatch size. --- --- The input SparseTensor objects' indices are assumed ordered --- in standard lexicographic order. If this is not the case, after this --- step run SparseReorder to restore index ordering. --- --- For example, if the serialized input is a `[2 x 3]` matrix --- representing two original SparseTensor objects: --- --- index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] --- --- and --- --- index = [ 2] [10] values = [4, 5] shape = [30] --- --- then the final deserialized SparseTensor will be: --- --- index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] --- shape = [2 50] -deserializeManySparse :: (TensorType dtype) => Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64) -deserializeManySparse' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64) - --- | Destroys the temporary variable and returns its final value. --- --- Sets output to the value of the Tensor pointed to by ref, --- then destroys the temporary variable called var_name. All --- other uses of ref *must* have executed before this op. This --- is typically achieved by chaining the ref through each assign op, or --- by using control dependencies. --- --- Outputs the final value of the tensor pointed to by ref. -destroyTemporaryVariable :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Value t) -destroyTemporaryVariable' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Value t) - --- | Returns a diagonal tensor with a given diagonal values. --- --- Given a diagonal, this operation returns a tensor with the --- diagonal and everything else padded with zeros. The diagonal --- is computed as follows: --- --- Assume diagonal has dimensions [D1,..., Dk], then the output --- is a tensor of rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: --- --- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 --- everywhere else. --- --- For example: --- --- ```prettyprint # diagonal is [1, 2, 3, 4] tf.diag(diagonal) --- ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] ``` -diag :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor Build t -diag' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns the diagonal part of the tensor. --- --- This operation returns a tensor with the diagonal part of the --- input. The diagonal part is computed as follows: --- --- Assume input has dimensions `[D1,..., Dk, D1,..., Dk]`, then --- the output is a tensor of rank k with dimensions `[D1,..., --- Dk]` where: --- --- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. --- --- For example: --- --- ```prettyprint # input is [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, --- 3, 0] [0, 0, 0, 4]] --- --- tf.diag_part(input) ==> [1, 2, 3, 4] ``` -diagPart :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor Build t -diagPart' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes Psi, the derivative of Lgamma (the log of the absolute value --- of --- --- `Gamma(x)`), element-wise. -digamma :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -digamma' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the grayscale dilation of 4-D input and 3-D --- filter tensors. --- --- The input tensor has shape `[batch, in_height, in_width, --- depth]` and the filter tensor has shape `[filter_height, --- filter_width, depth]`, i.e., each input channel is processed --- independently of the others with its own structuring function. The --- output tensor has shape `[batch, out_height, out_width, --- depth]`. The spatial dimensions of the output tensor depend on the --- padding algorithm. We currently only support the default --- NHWC data_format. --- --- In detail, the grayscale morphological 2-D dilation is the max-sum --- correlation (for consistency with conv2d, we use unmirrored --- filters): --- --- output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * --- dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c] --- --- Max-pooling is a special case when the filter has size equal to the --- pooling kernel size and contains all zeros. --- --- Note on duality: The dilation of input by the filter --- is equal to the negation of the erosion of `-input` by the reflected --- filter. -dilation2D :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -dilation2D' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the gradient of morphological 2-D dilation with respect to --- the filter. -dilation2DBackpropFilter :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -dilation2DBackpropFilter' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Computes the gradient of morphological 2-D dilation with respect to --- the input. -dilation2DBackpropInput :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -dilation2DBackpropInput' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Returns x / y element-wise. --- ---
                                  ---
                                • NOTE*: Div supports broadcasting. More about broadcasting --- here
                                • ---
                                -div :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -div' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Draw bounding boxes on a batch of images. --- --- Outputs a copy of images but draws on top of the pixels zero --- or more bounding boxes specified by the locations in boxes. --- The coordinates of the each bounding box in boxes are encoded --- as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are --- floats in `[0.0, 1.0]` relative to the width and height of the --- underlying image. --- --- For example, if an image is 100 x 200 pixels and the bounding box is --- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of --- the bounding box will be `(10, 40)` to `(50, 180)`. --- --- Parts of the bounding box may fall outside the image. -drawBoundingBoxes :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t -drawBoundingBoxes' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t - --- | Partitions `data` into num_partitions tensors using indices --- from partitions. --- --- For each index tuple js of size `partitions.ndim`, the slice --- `data[js, ...]` becomes part of `outputs[partitions[js]]`. The slices --- with `partitions[js] = i` are placed in `outputs[i]` in lexicographic --- order of js, and the first dimension of `outputs[i]` is the --- number of entries in partitions equal to i. In --- detail, --- --- ```python outputs[i].shape = [sum(partitions == i)] + --- data.shape[partitions.ndim:] --- --- outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) ``` --- --- `data.shape` must start with `partitions.shape`. --- --- For example: --- --- ```python # Scalar partitions. partitions = 1 num_partitions = 2 data --- = [10, 20] outputs[0] = [] # Empty with shape [0, 2] outputs[1] = --- [[10, 20]] --- --- # Vector partitions. partitions = [0, 0, 1, 1, 0] num_partitions = 2 --- data = [10, 20, 30, 40, 50] outputs[0] = [10, 20, 50] outputs[1] = --- [30, 40] ``` --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/DynamicPartition.png" alt /div -dynamicPartition :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t] -dynamicPartition' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t] - --- | Interleave the values from the `data` tensors into a single tensor. --- --- Builds a merged tensor such that --- --- ```python merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] --- ``` --- --- For example, if each `indices[m]` is scalar or vector, we have --- --- ```python # Scalar indices: merged[indices[m], ...] = data[m][...] --- --- # Vector indices: merged[indices[m][i], ...] = data[m][i, ...] ``` --- --- Each `data[i].shape` must start with the corresponding --- `indices[i].shape`, and the rest of `data[i].shape` must be constant --- w.r.t. i. That is, we must have `data[i].shape = --- indices[i].shape + constant`. In terms of this constant, the --- output shape is --- --- merged.shape = [max(indices)] + constant --- --- Values are merged in order, so if an index appears in both --- `indices[m][i]` and `indices[n][j]` for `(m,i) < (n,j)` the slice --- `data[n][j]` will appear in the merged result. --- --- For example: --- --- ```python indices[0] = 6 indices[1] = [4, 1] indices[2] = [[5, 2], [0, --- 3]] data[0] = [61, 62] data[1] = [[41, 42], [11, 12]] data[2] = [[[51, --- 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21, --- 22], [31, 32], [41, 42], [51, 52], [61, 62]] ``` --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/DynamicStitch.png" alt /div -dynamicStitch :: (TensorType t) => [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t -dynamicStitch' :: (TensorType t) => OpParams -> [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t - --- | Computes the (possibly normalized) Levenshtein Edit Distance. --- --- The inputs are variable-length sequences provided by SparseTensors --- (hypothesis_indices, hypothesis_values, hypothesis_shape) and --- (truth_indices, truth_values, truth_shape). --- --- The inputs are: -editDistance :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float -editDistance' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float - --- | Computes exponential linear: `exp(features) - 1` if < 0, --- features otherwise. --- --- See Fast and Accurate Deep Network Learning by Exponential Linear --- Units (ELUs) -elu :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -elu' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes gradients for the exponential linear (Elu) operation. -eluGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -eluGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Encode strings into web-safe base64 format. --- --- Refer to the following article for more information on base64 format: --- en.wikipedia.orgwikiBase64. Base64 strings may have padding --- with '=' at the end so that the encoded has length multiple of 4. See --- Padding section of the link above. --- --- Web-safe means that the encoder uses - and _ instead of + and /. -encodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString -encodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString - --- | JPEG-encode an image. --- --- image is a 3-D uint8 Tensor of shape `[height, width, --- channels]`. --- --- The attr format can be used to override the color format of --- the encoded output. Values can be: --- ---
                                  ---
                                • `''`: Use a default format based on the number of channels in the --- image.
                                • ---
                                • grayscale: Output a grayscale JPEG image. The --- channels dimension of image must be 1.
                                • ---
                                • rgb: Output an RGB JPEG image. The channels --- dimension of image must be 3.
                                • ---
                                --- --- If format is not specified or is the empty string, a default --- format is picked in function of the number of channels in --- image: --- ---
                                  ---
                                • 1: Output a grayscale image.
                                • ---
                                • 3: Output an RGB image.
                                • ---
                                -encodeJpeg :: Tensor v'1 Word8 -> Tensor Build ByteString -encodeJpeg' :: OpParams -> Tensor v'1 Word8 -> Tensor Build ByteString - --- | PNG-encode an image. --- --- image is a 3-D uint8 or uint16 Tensor of shape `[height, --- width, channels]` where channels is: --- ---
                                  ---
                                • 1: for grayscale.
                                • ---
                                • 2: for grayscale + alpha.
                                • ---
                                • 3: for RGB.
                                • ---
                                • 4: for RGBA.
                                • ---
                                --- --- The ZLIB compression level, compression, can be -1 for the --- PNG-encoder default or a value from 0 to 9. 9 is the highest --- compression level, generating the smallest output, but is slower. -encodePng :: (OneOf '[Word16, Word8] t) => Tensor v'1 t -> Tensor Build ByteString -encodePng' :: (OneOf '[Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString - --- | Creates or finds a child frame, and makes `data` available to the --- child frame. --- --- This op is used together with Exit to create loops in the --- graph. The unique frame_name is used by the Executor --- to identify frames. If is_constant is true, output --- is a constant in the child frame; otherwise it may be changed in the --- child frame. At most parallel_iterations iterations are run --- in parallel in the child frame. -enter :: (TensorType t) => Tensor v'1 t -> Tensor Build t -enter' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns the truth value of (x == y) element-wise. --- ---
                                  ---
                                • NOTE*: Equal supports broadcasting. More about --- broadcasting here
                                • ---
                                -equal :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -equal' :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool - --- | Computes the Gauss error function of x element-wise. -erf :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -erf' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the complementary error function of x element-wise. -erfc :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -erfc' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Exits the current frame to its parent frame. --- --- Exit makes its input `data` available to the parent frame. -exit :: (TensorType t) => Tensor v'1 t -> Tensor Build t -exit' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes exponential of x element-wise. \(y = e^x\). -exp :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -exp' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Inserts a dimension of 1 into a tensor's shape. --- --- Given a tensor input, this operation inserts a dimension of 1 --- at the dimension index dim of input's shape. The --- dimension index dim starts at zero; if you specify a negative --- number for dim it is counted backward from the end. --- --- This operation is useful if you want to add a batch dimension to a --- single element. For example, if you have a single image of shape --- `[height, width, channels]`, you can make it a batch of 1 image with --- `expand_dims(image, 0)`, which will make the shape `[1, height, width, --- channels]`. --- --- Other examples: --- --- ```prettyprint # t is a tensor of shape [2] --- shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> --- [2, 1] shape(expand_dims(t, -1)) ==> [2, 1] --- --- # t2 is a tensor of shape [2, 3, 5] shape(expand_dims(t2, 0)) --- ==> [1, 2, 3, 5] shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] --- shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] ``` --- --- This operation requires that: --- --- `-1-input.dims() <= dim <= input.dims()` --- --- This operation is related to `squeeze()`, which removes dimensions of --- size 1. -expandDims :: (TensorType t, OneOf '[Int32, Int64] tdim) => Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t -expandDims' :: (TensorType t, OneOf '[Int32, Int64] tdim) => OpParams -> Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t - --- | Computes exponential of x - 1 element-wise. --- --- I.e., \(y = (exp x) - 1\). -expm1 :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -expm1' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Extracts a glimpse from the input tensor. --- --- Returns a set of windows called glimpses extracted at location --- offsets from the input tensor. If the windows only partially --- overlaps the inputs, the non overlapping areas will be filled with --- random noise. --- --- The result is a 4-D tensor of shape `[batch_size, glimpse_height, --- glimpse_width, channels]`. The channels and batch dimensions are the --- same as that of the input tensor. The height and width of the output --- windows are specified in the size parameter. --- --- The argument normalized and centered controls how --- the windows are built: --- ---
                                  ---
                                • If the coordinates are normalized but not centered, 0.0 and 1.0 --- correspond to the minimum and maximum of each height and width --- dimension.
                                • ---
                                • If the coordinates are both normalized and centered, they range --- from
                                • ---
                                • 1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper --- left corner, the lower right corner is located at (1.0, 1.0) and the --- center is at (0, 0).
                                • ---
                                • If the coordinates are not normalized they are interpreted as --- numbers of pixels.
                                • ---
                                -extractGlimpse :: Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float -extractGlimpse' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float - --- | Extract patches from images and put them in the --- "depth" output dimension. -extractImagePatches :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -extractImagePatches' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Compute the 1-dimensional discrete Fourier Transform over the --- inner-most --- --- dimension of input. -fFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -fFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) - --- | Compute the 2-dimensional discrete Fourier Transform over the --- inner-most --- --- 2 dimensions of input. -fFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -fFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) - --- | Compute the 3-dimensional discrete Fourier Transform over the --- inner-most 3 --- --- dimensions of input. -fFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -fFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) - --- | A queue that produces elements in first-in first-out order. -fIFOQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) -fIFOQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) - --- | A queue that produces elements in first-in first-out order. -fIFOQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle) -fIFOQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle) - --- | Output a fact about factorials. -fact :: Tensor Build ByteString -fact' :: OpParams -> Tensor Build ByteString - --- | Fake-quantize the inputs tensor, type float to --- outputs tensor of same type. --- --- Attributes [min; max] define the clamping range for the --- inputs data. Op divides this range into 255 steps (total of --- 256 values), then replaces each inputs value with the closest --- of the quantized step values. --- --- Quantization is called fake since the output is still in floating --- point. -fakeQuantWithMinMaxArgs :: Tensor v'1 Float -> Tensor Build Float -fakeQuantWithMinMaxArgs' :: OpParams -> Tensor v'1 Float -> Tensor Build Float - --- | Compute gradients for a FakeQuantWithMinMaxArgs operation. -fakeQuantWithMinMaxArgsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float -fakeQuantWithMinMaxArgsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float - --- | Fake-quantize the inputs tensor of type float and shape `[b, --- h, w, d]` via --- --- global float scalars min and max to outputs --- tensor of same shape as inputs. --- ---
                                  ---
                                • min; max is the clamping range for the inputs --- data. Op divides this range into 255 steps (total of 256 values), then --- replaces each inputs value with the closest of the quantized --- step values.
                                • ---
                                --- --- This operation has a gradient and thus allows for training min --- and max values. -fakeQuantWithMinMaxVars :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float -fakeQuantWithMinMaxVars' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float - --- | Compute gradients for a FakeQuantWithMinMaxVars operation. -fakeQuantWithMinMaxVarsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -fakeQuantWithMinMaxVarsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) - --- | Fake-quantize the inputs tensor of type float and one of the --- shapes: `[d]`, --- --- `[b, d]` `[b, h, w, d]` via per-channel floats min and --- max of shape `[d]` to outputs tensor of same shape as --- inputs. --- ---
                                  ---
                                • min; max is the clamping range for the inputs data --- in the corresponding depth channel. Op divides this range into 255 --- steps (total of 256 values), then replaces each inputs value --- with the closest of the quantized step values.
                                • ---
                                --- --- This operation has a gradient and thus allows for training min --- and max values. -fakeQuantWithMinMaxVarsPerChannel :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float -fakeQuantWithMinMaxVarsPerChannel' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float - --- | Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. -fakeQuantWithMinMaxVarsPerChannelGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -fakeQuantWithMinMaxVarsPerChannelGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) - --- | Deprecated. Do not use. -fakeQueue :: (MonadBuild m') => ResourceHandle -> m' (Tensor Ref ByteString) -fakeQueue' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Ref ByteString) - --- | Creates a tensor filled with a scalar value. --- --- This operation creates a tensor of shape dims and fills it --- with value. --- --- For example: --- --- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) --- ==> [[9, 9, 9] [9, 9, 9]] ``` -fill :: (TensorType t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t -fill' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t - --- | A Reader that outputs fixed-length records from a file. -fixedLengthRecordReader :: (MonadBuild m') => Int64 -> m' (Tensor Ref ByteString) -fixedLengthRecordReader' :: (MonadBuild m') => OpParams -> Int64 -> m' (Tensor Ref ByteString) - --- | A Reader that outputs fixed-length records from a file. -fixedLengthRecordReaderV2 :: (MonadBuild m') => Int64 -> m' (ResourceHandle) -fixedLengthRecordReaderV2' :: (MonadBuild m') => OpParams -> Int64 -> m' (ResourceHandle) - --- | Generates labels for candidate sampling with a learned unigram --- distribution. --- --- A unigram sampler could use a fixed unigram distribution read from a --- file or passed in as an in-memory array instead of building up the --- distribution from data on the fly. There is also an option to skew the --- distribution by applying a distortion power to the weights. --- --- The vocabulary file should be in CSV-like format, with the last field --- being the weight associated with the word. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -fixedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -fixedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) - --- | Returns element-wise largest integer not greater than x. -floor :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -floor' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns x // y element-wise. --- ---
                                  ---
                                • NOTE*: FloorDiv supports broadcasting. More about --- broadcasting here
                                • ---
                                -floorDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -floorDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Returns element-wise remainder of division. When `x < 0` xor `y --- < 0` is --- --- true, this follows Python semantics in that the result here is --- consistent with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) --- = x`. --- ---
                                  ---
                                • NOTE*: FloorMod supports broadcasting. More about --- broadcasting here
                                • ---
                                -floorMod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -floorMod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Performs fractional average pooling on the input. --- --- Fractional average pooling is similar to Fractional max pooling in the --- pooling region generation step. The only difference is that after --- pooling regions are generated, a mean operation is performed instead --- of a max operation in each pooling region. -fractionalAvgPool :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) -fractionalAvgPool' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) - --- | Computes gradient of the FractionalAvgPool function. --- --- Unlike FractionalMaxPoolGrad, we don't need to find arg_max for --- FractionalAvgPoolGrad, we just need to evenly back-propagate each --- element of out_backprop to those indices that form the same pooling --- cell. Therefore, we just need to know the shape of original input --- tensor, instead of the whole tensor. -fractionalAvgPoolGrad :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t -fractionalAvgPoolGrad' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t - --- | Performs fractional max pooling on the input. --- --- Fractional max pooling is slightly different than regular max pooling. --- In regular max pooling, you downsize an input set by taking the --- maximum value of smaller N x N subsections of the set (often 2x2), and --- try to reduce the set by a factor of N, where N is an integer. --- Fractional max pooling, as you might expect from the word --- "fractional", means that the overall reduction ratio N does not have --- to be an integer. --- --- The sizes of the pooling regions are generated randomly but are fairly --- uniform. For example, let's look at the height dimension, and the --- constraints on the list of rows that will be pool boundaries. --- --- First we define the following: --- ---
                                  ---
                                1. input_row_length : the number of rows from the input set
                                2. ---
                                3. output_row_length : which will be smaller than the input
                                4. ---
                                5. alpha = input_row_length / output_row_length : our reduction --- ratio
                                6. ---
                                7. K = floor(alpha)
                                8. ---
                                9. row_pooling_sequence : this is the result list of pool boundary --- rows
                                10. ---
                                --- --- Then, row_pooling_sequence should satisfy: --- ---
                                  ---
                                1. a[0] = 0 : the first value of the sequence is 0
                                2. ---
                                3. a[end] = input_row_length : the last value of the sequence is the --- size
                                4. ---
                                5. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 --- size
                                6. ---
                                7. length(row_pooling_sequence) = output_row_length+1
                                8. ---
                                --- --- For more details on fractional max pooling, see this paper: --- Benjamin Graham, Fractional Max-Pooling -fractionalMaxPool :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) -fractionalMaxPool' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) - --- | Computes gradient of the FractionalMaxPool function. -fractionalMaxPoolGrad :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t -fractionalMaxPoolGrad' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t - --- | Batch normalization. --- --- Note that the size of 4D Tensors are defined by either NHWC or --- NCHW. The size of 1D Tensors matches the dimension C of the 4D --- Tensors. -fusedBatchNorm :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -fusedBatchNorm' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) - --- | Gradient for batch normalization. --- --- Note that the size of 4D Tensors are defined by either NHWC or --- NCHW. The size of 1D Tensors matches the dimension C of the 4D --- Tensors. -fusedBatchNormGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -fusedBatchNormGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) - --- | Performs a padding as a preprocess during a convolution. --- --- Similar to FusedResizeAndPadConv2d, this op allows for an optimized --- implementation where the spatial padding transformation stage is fused --- with the im2col lookup, but in this case without the bilinear --- filtering required for resizing. Fusing the padding prevents the need --- to write out the intermediate results as whole tensors, reducing --- memory pressure, and we can get some latency gains by merging the --- transformation calculations. The data_format attribute for Conv2D --- isn't supported by this op, and NHWC order is used instead. --- Internally this op uses a single per-graph scratch buffer, which means --- that it will block if multiple versions are being run in parallel. --- This is because this operator is primarily an optimization to minimize --- memory usage. -fusedPadConv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t -fusedPadConv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t - --- | Performs a resize and padding as a preprocess during a convolution. --- --- It's often possible to do spatial transformations more efficiently as --- part of the packing stage of a convolution, so this op allows for an --- optimized implementation where these stages are fused together. This --- prevents the need to write out the intermediate results as whole --- tensors, reducing memory pressure, and we can get some latency gains --- by merging the transformation calculations. The data_format attribute --- for Conv2D isn't supported by this op, and defaults to NHWC --- order. Internally this op uses a single per-graph scratch buffer, --- which means that it will block if multiple versions are being run in --- parallel. This is because this operator is primarily an optimization --- to minimize memory usage. -fusedResizeAndPadConv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t -fusedResizeAndPadConv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t - --- | Gather slices from params according to indices. --- --- indices must be an integer tensor of any dimension (usually --- 0-D or 1-D). Produces an output tensor with shape `indices.shape + --- params.shape[1:]` where: --- --- ```python # Scalar indices output[:, ..., :] = params[indices, :, ... --- :] --- --- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] --- --- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, --- ..., j], :, ..., :] ``` --- --- If indices is a permutation and `len(indices) == --- params.shape[0]` then this operation will permute params --- accordingly. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/Gather.png" alt /div -gather :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams -gather' :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams - --- | Gather values or slices from params according to --- indices. --- --- params is a Tensor of rank P and indices is --- a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- params. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 --- < K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of params. --- --- Produces an output tensor with shape --- --- ``` [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. ``` --- --- Some examples below. --- --- Simple indexing into a matrix: --- --- ```python indices = [[0, 0], [1, 1]] params = [[a, --- b], [c, d]] output = [a, --- d] ``` --- --- Slice indexing into a matrix: --- --- ```python indices = [[1], [0]] params = [[a, b], --- [c, d]] output = [[c, d], --- [a, b]] ``` --- --- Indexing into a 3-tensor: --- --- ```python indices = [[1]] params = [[[a0, b0], --- [c0, d0]], [[a1, b1], --- [c1, d1]]] output = [[[a1, b1], --- [c1, d1]]] --- --- indices = [[0, 1], [1, 0]] params = [[[a0, b0], --- [c0, d0]], [[a1, b1], --- [c1, d1]]] output = [[c0, d0], --- [a1, b1]] --- --- indices = [[0, 0, 1], [1, 0, 1]] params = [[[a0, --- b0], [c0, d0]], [[a1, --- b1], [c1, d1]]] output = [b0, --- b1] ``` --- --- Batched indexing into a matrix: --- --- ```python indices = [[[0, 0]], [[0, 1]]] params = [[a, --- b], [c, d]] output = [[a], --- [b]] ``` --- --- Batched slice indexing into a matrix: --- --- ```python indices = [[[1]], [[0]]] params = [[a, b], --- [c, d]] output = [[[c, d]], --- [[a, b]]] ``` --- --- Batched indexing into a 3-tensor: --- --- ```python indices = [[[1]], [[0]]] params = [[[a0, --- b0], [c0, d0]], [[a1, --- b1], [c1, d1]]] output = [[[[a1, --- b1], [c1, d1]]], [[[a0, --- b0], [c0, d0]]]] --- --- indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params = --- [[[a0, b0], [c0, d0]], --- [[a1, b1], [c1, d1]]] output = --- [[[c0, d0], [a1, b1]], --- [[a0, b0], [c1, d1]]] --- --- indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params = --- [[[a0, b0], [c0, d0]], --- [[a1, b1], [c1, d1]]] output = --- [[b0, b1], [d0, c1]] ``` -gatherNd :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams -gatherNd' :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams - --- | Store the input tensor in the state of the current session. -getSessionHandle :: (TensorType t) => Tensor v'1 t -> Tensor Build ByteString -getSessionHandle' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString - --- | Get the value of the tensor specified by its handle. -getSessionTensor :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor Build dtype -getSessionTensor' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype - --- | Returns the truth value of (x > y) element-wise. --- ---
                                  ---
                                • NOTE*: Greater supports broadcasting. More about --- broadcasting here
                                • ---
                                -greater :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -greater' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool - --- | Returns the truth value of (x >= y) element-wise. --- ---
                                  ---
                                • NOTE*: GreaterEqual supports broadcasting. More about --- broadcasting here
                                • ---
                                -greaterEqual :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -greaterEqual' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool - --- | Convert one or more images from HSV to RGB. --- --- Outputs a tensor of the same shape as the images tensor, --- containing the RGB value of the pixels. The output is only well --- defined if the value in images are in `[0,1]`. --- --- See rgb_to_hsv for a description of the HSV encoding. -hSVToRGB :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -hSVToRGB' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Creates a non-initialized hash table. --- --- This op creates a hash table, specifying the type of its keys and --- values. Before using the table you will have to initialize it. After --- initialization the table will be immutable. -hashTable :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString) -hashTable' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString) - --- | Outputs a Summary protocol buffer with a histogram. --- --- The generated `Summary` has one summary value containing a --- histogram for values. --- --- This op reports an InvalidArgument error if any value is not --- finite. -histogramSummary :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString -histogramSummary' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString - --- | Compute the inverse 1-dimensional discrete Fourier Transform over the --- inner-most --- --- dimension of input. -iFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -iFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) - --- | Compute the inverse 2-dimensional discrete Fourier Transform over the --- inner-most --- --- 2 dimensions of input. -iFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -iFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) - --- | Compute the inverse 3-dimensional discrete Fourier Transform over the --- inner-most --- --- 3 dimensions of input. -iFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -iFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) - --- | Return a tensor with the same shape and contents as the input tensor --- or value. -identity :: (TensorType t) => Tensor v'1 t -> Tensor Build t -identity' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | A Reader that outputs the queued work as both the key and value. --- --- To use, enqueue strings in a Queue. ReaderRead will take the front --- work string and output (work, work). -identityReader :: (MonadBuild m') => m' (Tensor Ref ByteString) -identityReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) - --- | A Reader that outputs the queued work as both the key and value. --- --- To use, enqueue strings in a Queue. ReaderRead will take the front --- work string and output (work, work). -identityReaderV2 :: (MonadBuild m') => m' (ResourceHandle) -identityReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) - --- | Compute the lower regularized incomplete Gamma function `Q(a, x)`. --- --- The lower regularized incomplete Gamma function is defined as: --- --- ``` P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) ``` where ``` --- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt ``` is the lower --- incomplete Gamma function. --- --- Note, above `Q(a, x)` (Igammac) is the upper regularized --- complete Gamma function. -igamma :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -igamma' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Compute the upper regularized incomplete Gamma function `Q(a, x)`. --- --- The upper regularized incomplete Gamma function is defined as: --- --- ``` Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) ``` where ``` --- Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt ``` is the upper --- incomplete Gama function. --- --- Note, above `P(a, x)` (Igamma) is the lower regularized --- complete Gamma function. -igammac :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -igammac' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Returns the imaginary part of a complex number. --- --- Given a tensor input of complex numbers, this operation --- returns a tensor of type float that is the imaginary part of --- each element in input. All elements in input must be --- complex numbers of the form \(a + bj\), where *a* is the real part and --- *b* is the imaginary part returned by this operation. --- --- For example: --- --- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] --- tf.imag(input) ==> [4.75, 5.75] ``` -imag :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout -imag' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout - --- | Outputs a Summary protocol buffer with images. --- --- The summary has up to max_images summary values containing --- images. The images are built from tensor which must be 4-D --- with shape `[batch_size, height, width, channels]` and where --- channels can be: --- ---
                                  ---
                                • 1: tensor is interpreted as Grayscale.
                                • ---
                                • 3: tensor is interpreted as RGB.
                                • ---
                                • 4: tensor is interpreted as RGBA.
                                • ---
                                --- --- The images have the same number of channels as the input tensor. For --- float input, the values are normalized one image at a time to fit in --- the range `[0, 255]`. uint8 values are unchanged. The op uses --- two different normalization algorithms: --- ---
                                  ---
                                • If the input values are all positive, they are rescaled so the --- largest one is 255.
                                • ---
                                • If any input value is negative, the values are shifted so input --- value 0.0 is at 127. They are then rescaled so that either the --- smallest value is 0, or the largest one is 255.
                                • ---
                                --- --- The tag argument is a scalar Tensor of type --- string. It is used to build the tag of the summary --- values: --- ---
                                  ---
                                • If max_images is 1, the summary value tag is --- '*tag*/image'.
                                • ---
                                • If max_images is greater than 1, the summary value tags --- are generated sequentially as '*tag*/image/0', '*tag*/image/1', --- etc.
                                • ---
                                --- --- The bad_color argument is the color to use in the generated --- images for non-finite input values. It is a unit8 1-D tensor --- of length channels. Each element must be in the range `[0, --- 255]` (It represents the value of a pixel in the output image). --- Non-finite values in the input tensor are replaced by this tensor in --- the output image. The default value is the color red. -imageSummary :: (OneOf '[Word16, Word8, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString -imageSummary' :: (OneOf '[Word16, Word8, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString - --- | Returns immutable tensor from memory region. --- --- The current implementation memmaps the tensor from a file. -immutableConst :: (TensorType dtype) => Shape -> Tensor Build dtype -immutableConst' :: (TensorType dtype) => OpParams -> Shape -> Tensor Build dtype - --- | Says whether the targets are in the top K predictions. --- --- This outputs a batch_size bool array, an entry `out[i]` is --- true if the prediction for the target class is among the top --- k predictions among all predictions for example i. --- Note that the behavior of InTopK differs from the --- TopK op in its handling of ties; if multiple classes have the --- same prediction value and straddle the top-k boundary, all of --- those classes are considered to be in the top k. --- --- More formally, let --- --- \(predictions_i\) be the predictions for all classes for example --- i, \(targets_i\) be the target class for example i, --- \(out_i\) be the output for example i, --- --- $$out_i = predictions_{i, targets_i} in --- TopKIncludingTies(predictions_i)$$ -inTopK :: (OneOf '[Int32, Int64] t) => Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool -inTopK' :: (OneOf '[Int32, Int64] t) => OpParams -> Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool - --- | Table initializer that takes two tensors for keys and values --- respectively. -initializeTable :: (MonadBuild m', TensorType tkey, TensorType tval) => Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' (ControlNode) -initializeTable' :: (MonadBuild m', TensorType tkey, TensorType tval) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' (ControlNode) - --- | Initializes a table from a text file. --- --- It inserts one key-value pair into the table for each line of the --- file. The key and value is extracted from the whole line content, --- elements from the split line based on delimiter or the line --- number (starting from zero). Where to extract the key and value from a --- line is specified by key_index and value_index. --- ---
                                  ---
                                • A value of -1 means use the line number(starting from zero), --- expects int64.
                                • ---
                                • A value of -2 means use the whole line content, expects --- string.
                                • ---
                                • A value >= 0 means use the index (starting at zero) of the --- split line based on delimiter.
                                • ---
                                -initializeTableFromTextFile :: (MonadBuild m') => Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) -initializeTableFromTextFile' :: (MonadBuild m') => OpParams -> Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) - --- | Computes the reciprocal of x element-wise. --- --- I.e., \(y = 1 / x\). -inv :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -inv' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the gradient for the inverse of x wrt its input. --- --- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy is --- the corresponding input gradient. -invGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -invGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the inverse permutation of a tensor. --- --- This operation computes the inverse of an index permutation. It takes --- a 1-D integer tensor x, which represents the indices of a --- zero-based array, and swaps each value with its index position. In --- other words, for an output tensor y and an input tensor --- x, this operation computes the following: --- --- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` --- --- The values must include 0. There can be no duplicate values or --- negative values. --- --- For example: --- --- ```prettyprint # tensor x is [3, 4, 0, 2, 1] --- invert_permutation(x) ==> [2, 4, 3, 0, 1] ``` -invertPermutation :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor Build t -invertPermutation' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns which elements of x are finite. --- --- compatibility(numpy) Equivalent to np.isfinite --- end_compatibility -isFinite :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool -isFinite' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool - --- | Returns which elements of x are Inf. --- --- compatibility(numpy) Equivalent to np.isinf end_compatibility -isInf :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool -isInf' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool - --- | Returns which elements of x are NaN. --- --- compatibility(numpy) Equivalent to np.isnan end_compatibility -isNan :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool -isNan' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool - --- | Checks whether a tensor has been initialized. --- --- Outputs boolean scalar indicating whether the tensor has been --- initialized. -isVariableInitialized :: (MonadBuild m', TensorType dtype) => Tensor Ref dtype -> m' (Tensor Value Bool) -isVariableInitialized' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref dtype -> m' (Tensor Value Bool) - --- | L2 Loss. --- --- Computes half the L2 norm of a tensor without the sqrt: --- --- output = sum(t ** 2) / 2 -l2Loss :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -l2Loss' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Local Response Normalization. --- --- The 4-D input tensor is treated as a 3-D array of 1-D vectors --- (along the last dimension), and each vector is normalized --- independently. Within a given vector, each component is divided by the --- weighted, squared sum of inputs within depth_radius. In --- detail, --- --- sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + --- depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) ** --- beta --- --- For details, see Krizhevsky et al., ImageNet classification with --- deep convolutional neural networks (NIPS 2012). -lRN :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor Build t -lRN' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Gradients for Local Response Normalization. -lRNGrad :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -lRNGrad' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Generates labels for candidate sampling with a learned unigram --- distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -learnedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -learnedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) - --- | Returns the truth value of (x < y) element-wise. --- ---
                                  ---
                                • NOTE*: Less supports broadcasting. More about --- broadcasting here
                                • ---
                                -less :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -less' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool - --- | Returns the truth value of (x <= y) element-wise. --- ---
                                  ---
                                • NOTE*: LessEqual supports broadcasting. More about --- broadcasting here
                                • ---
                                -lessEqual :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -lessEqual' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool - --- | Computes the log of the absolute value of `Gamma(x)` element-wise. -lgamma :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -lgamma' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Generates values in an interval. --- --- A sequence of num evenly-spaced values are generated --- beginning at start. If `num > 1`, the values in the --- sequence increase by `stop - start / num - 1`, so that the last one is --- exactly stop. --- --- For example: --- --- ``` tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 --- 12.0] ``` -linSpace :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t -linSpace' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t - --- | Computes the difference between two lists of numbers or strings. --- --- Given a list x and a list y, this operation returns --- a list out that represents all values that are in x --- but not in y. The returned list out is sorted in the --- same order that the numbers appear in x (duplicates are --- preserved). This operation also returns a list idx that --- represents the position of each out element in x. In --- other words: --- --- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` --- --- For example, given this input: --- --- ```prettyprint x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ``` --- --- This operation would return: --- --- ```prettyprint out ==> [2, 4, 6] idx ==> [1, 3, 5] ``` -listDiff :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx) -listDiff' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx) - --- | Computes natural logarithm of x element-wise. --- --- I.e., \(y = log_e x\). -log :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -log' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes natural logarithm of (1 + x) element-wise. --- --- I.e., \(y = log_e (1 + x)\). -log1p :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -log1p' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes log softmax activations. --- --- For each batch i and class j we have --- --- logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) -logSoftmax :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -logSoftmax' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Generates labels for candidate sampling with a log-uniform --- distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -logUniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -logUniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) - --- | Returns the truth value of x AND y element-wise. --- ---
                                  ---
                                • NOTE*: LogicalAnd supports broadcasting. More about --- broadcasting here
                                • ---
                                -logicalAnd :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool -logicalAnd' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool - --- | Returns the truth value of NOT x element-wise. -logicalNot :: Tensor v'1 Bool -> Tensor Build Bool -logicalNot' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool - --- | Returns the truth value of x OR y element-wise. --- ---
                                  ---
                                • NOTE*: LogicalOr supports broadcasting. More about --- broadcasting here
                                • ---
                                -logicalOr :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool -logicalOr' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool - --- | Outputs all keys and values in the table. -lookupTableExport :: (MonadBuild m', TensorType tkeys, TensorType tvalues) => Tensor Ref ByteString -> m' ((Tensor Value tkeys, Tensor Value tvalues)) -lookupTableExport' :: (MonadBuild m', TensorType tkeys, TensorType tvalues) => OpParams -> Tensor Ref ByteString -> m' ((Tensor Value tkeys, Tensor Value tvalues)) - --- | Looks up keys in a table, outputs the corresponding values. --- --- The tensor keys must of the same type as the keys of the --- table. The output values is of the type of the table values. --- --- The scalar default_value is the value output for keys not --- present in the table. It must also be of the same type as the table --- values. -lookupTableFind :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout) -lookupTableFind' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout) - --- | Replaces the contents of the table with the specified keys and values. --- --- The tensor keys must be of the same type as the keys of the --- table. The tensor values must be of the type of the table --- values. -lookupTableImport :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) -lookupTableImport' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) - --- | Updates the table to associates keys with values. --- --- The tensor keys must be of the same type as the keys of the --- table. The tensor values must be of the type of the table --- values. -lookupTableInsert :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) -lookupTableInsert' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) - --- | Computes the number of elements in the given table. -lookupTableSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64) -lookupTableSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64) - --- | Forwards the input to the output. --- --- This operator represents the loop termination condition used by the --- "pivot" switches of a loop. -loopCond :: Tensor v'1 Bool -> Tensor Build Bool -loopCond' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool - --- | Multiply the matrix "a" by the matrix "b". --- --- The inputs must be two-dimensional matrices and the inner dimension of --- "a" (after being transposed if transpose_a is true) must match the --- outer dimension of "b" (after being transposed if transposed_b is --- true). --- ---
                                  ---
                                • Note*: The default kernel implementation for MatMul on GPUs uses --- cublas.
                                • ---
                                -matMul :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -matMul' :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Returns the set of files matching a pattern. --- --- Note that this routine only supports wildcard characters in the --- basename portion of the pattern, not in the directory portion. -matchingFiles :: Tensor v'1 ByteString -> Tensor Build ByteString -matchingFiles' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString - --- | Copy a tensor setting everything outside a central band in each --- innermost matrix --- --- to zero. --- --- The band part is computed as follows: Assume input --- has k dimensions `[I, J, K, ..., M, N]`, then the output is a --- tensor with the same shape where --- --- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, --- n]`. --- --- The indicator function --- --- `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) --- && (num_upper < 0 || (n-m) <= num_upper)`. --- --- For example: --- --- ```prettyprint # if input is [[ 0, 1, 2, 3] [-1, 0, 1, 2] --- [-2, -1, 0, 1] [-3, -2, -1, 0]], --- --- tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] [-1, 0, 1, 2] --- [ 0, -1, 0, 1] [ 0, 0, -1, 0]], --- --- tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] [-1, 0, 1, 0] --- [-2, -1, 0, 1] [ 0, -2, -1, 0]] ``` --- --- Useful special cases: --- --- ```prettyprint tf.matrix_band_part(input, 0, -1) ==> Upper --- triangular part. tf.matrix_band_part(input, -1, 0) ==> Lower --- triangular part. tf.matrix_band_part(input, 0, 0) ==> Diagonal. ``` -matrixBandPart :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t -matrixBandPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t - --- | Computes the determinant of one ore more square matrices. --- --- The input is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices. The output is a tensor containing the --- determinants for all input submatrices `[..., :, :]`. -matrixDeterminant :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -matrixDeterminant' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns a batched diagonal tensor with a given batched diagonal --- values. --- --- Given a diagonal, this operation returns a tensor with the --- diagonal and everything else padded with zeros. The diagonal --- is computed as follows: --- --- Assume diagonal has k dimensions `[I, J, K, ..., --- N]`, then the output is a tensor of rank `k+1` with dimensions [I, J, --- K, ..., N, N]` where: --- --- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. --- --- For example: --- --- ```prettyprint # diagonal is [[1, 2, 3, 4], [5, 6, 7, 8]] --- --- and diagonal.shape = (2, 4) --- --- tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, --- 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, --- 8]]] --- --- which has shape (2, 4, 4) ``` -matrixDiag :: (TensorType t) => Tensor v'1 t -> Tensor Build t -matrixDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns the batched diagonal part of a batched tensor. --- --- This operation returns a tensor with the diagonal part of the --- batched input. The diagonal part is computed as --- follows: --- --- Assume input has k dimensions `[I, J, K, ..., M, --- N]`, then the output is a tensor of rank `k - 1` with dimensions `[I, --- J, K, ..., min(M, N)]` where: --- --- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. --- --- The input must be at least a matrix. --- --- For example: --- --- ```prettyprint # input is [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, --- 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, --- 8]]] --- --- and input.shape = (2, 4, 4) --- --- tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] --- --- which has shape (2, 4) ``` -matrixDiagPart :: (TensorType t) => Tensor v'1 t -> Tensor Build t -matrixDiagPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the inverse of one or more square invertible matrices or --- their --- --- adjoints (conjugate transposes). --- --- The input is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices. The output is a tensor of the same --- shape as the input containing the inverse for all input submatrices --- `[..., :, :]`. --- --- The op uses LU decomposition with partial pivoting to compute the --- inverses. --- --- If a matrix is not invertible there is no guarantee what the op does. --- It may detect the condition and raise an exception or it may simply --- return a garbage result. -matrixInverse :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -matrixInverse' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns a batched matrix tensor with new batched diagonal values. --- --- Given input and diagonal, this operation returns a --- tensor with the same shape and values as input, except for --- the main diagonal of the innermost matrices. These will be overwritten --- by the values in diagonal. --- --- The output is computed as follows: --- --- Assume input has `k+1` dimensions `[I, J, K, ..., M, N]` and --- diagonal has k dimensions `[I, J, K, ..., min(M, --- N)]`. Then the output is a tensor of rank `k+1` with dimensions `[I, --- J, K, ..., M, N]` where: --- ---
                                  ---
                                • `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == --- n`.
                                • ---
                                • `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != --- n`.
                                • ---
                                -matrixSetDiag :: (TensorType t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -matrixSetDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Solves systems of linear equations. --- --- Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices. Rhs is a tensor of shape --- `[..., M, K]`. The output is a tensor shape `[..., M, K]`. If --- adjoint is False then each output matrix satisfies --- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If --- adjoint is True then each output matrix satisfies --- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. -matrixSolve :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -matrixSolve' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Solves one or more linear least-squares problems. --- --- matrix is a tensor of shape `[..., M, N]` whose inner-most 2 --- dimensions form matrices of size `[M, N]`. Rhs is a tensor of shape --- `[..., M, K]`. The output is a tensor shape `[..., N, K]` where each --- output matrix solves each of the equations matrix[..., :, :] * --- output[..., :, :] = rhs[..., :, :] in the least squares sense. --- --- matrix and right-hand sides in the batch: --- --- matrix=\(A in Re^{m times n}\), rhs=\(B in Re^{m --- times k}\), output=\(X in Re^{n times k}\), --- l2_regularizer=\(lambda\). --- --- If fast is True, then the solution is computed by --- solving the normal equations using Cholesky decomposition. --- Specifically, if \(m ge n\) then \(X = (A^T A + lambda I)^{-1} A^T --- B\), which solves the least-squares problem \(X = mathrm{argmin}_{Z in --- Re^{n times k} } ||A Z - B||_F^2 + lambda ||Z||_F^2\). If \(m lt n\) --- then output is computed as \(X = A^T (A A^T + lambda I)^{-1} --- B\), which (for \(lambda = 0\)) is the minimum-norm solution to the --- under-determined linear system, i.e. \(X = mathrm{argmin}_{Z in Re^{n --- times k} } ||Z||_F^2 \), subject to \(A Z = B\). Notice that the fast --- path is only numerically stable when \(A\) is numerically full rank --- and has a condition number \(mathrm{cond}(A) lt --- frac{1}{sqrt{epsilon_{mach} } }\) or\(lambda\) is sufficiently large. --- --- If fast is False an algorithm based on the numerically --- robust complete orthogonal decomposition is used. This computes the --- minimum-norm least-squares solution, even when \(A\) is rank --- deficient. This path is typically 6-7 times slower than the fast path. --- If fast is False then l2_regularizer is --- ignored. -matrixSolveLs :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t -matrixSolveLs' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t - --- | Solves systems of linear equations with upper or lower triangular --- matrices by --- --- backsubstitution. --- --- matrix is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices. If lower is True then --- the strictly upper triangular part of each inner-most matrix is --- assumed to be zero and not accessed. If lower is False then --- the strictly lower triangular part of each inner-most matrix is --- assumed to be zero and not accessed. rhs is a tensor of shape --- `[..., M, K]`. --- --- The output is a tensor of shape `[..., M, K]`. If adjoint is --- True then the innermost matrices in output` satisfy matrix --- equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If --- adjoint is False then the strictly then the innermost --- matrices in output satisfy matrix equations --- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. -matrixTriangularSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -matrixTriangularSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the maximum of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -max :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -max' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Performs max pooling on the input. -maxPool :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor Build t -maxPool' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Performs 3D max pooling on the input. -maxPool3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -maxPool3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes gradients of max pooling function. -maxPool3DGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t -maxPool3DGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t - --- | Computes gradients of the maxpooling function. -maxPoolGrad :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -maxPoolGrad' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Computes gradients of the maxpooling function. -maxPoolGradWithArgmax :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t -maxPoolGradWithArgmax' :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t - --- | Performs max pooling on the input and outputs both max values and --- indices. --- --- The indices in argmax are flattened, so that a maximum value --- at position `[b, y, x, c]` becomes flattened index `((b * height + y) --- * width + x) * channels + c`. -maxPoolWithArgmax :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build targmax) -maxPoolWithArgmax' :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build targmax) - --- | Returns the max of x and y (i.e. x > y ? x : y) element-wise. --- ---
                                  ---
                                • NOTE*: Maximum supports broadcasting. More about --- broadcasting here
                                • ---
                                -maximum :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -maximum' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the mean of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -mean :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -mean' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Forwards the value of an available tensor from inputs to --- output. --- --- Merge waits for at least one of the tensors in --- inputs to become available. It is usually combined with --- Switch to implement branching. --- --- Merge forwards the first tensor for become available to --- output, and sets value_index to its index in --- inputs. -merge :: (TensorType t) => [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32) -merge' :: (TensorType t) => OpParams -> [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32) - --- | Merges summaries. --- --- This op creates a `Summary` protocol buffer that contains the --- union of all the values in the input summaries. --- --- When the Op is run, it reports an InvalidArgument error if --- multiple values in the summaries to merge use the same tag. -mergeSummary :: [Tensor v'1 ByteString] -> Tensor Build ByteString -mergeSummary' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString - --- | V2 format specific: merges the metadata files of sharded checkpoints. --- The --- --- result is one logical checkpoint, with one physical metadata file and --- renamed data files. --- --- Intended for "grouping" multiple checkpoints in a sharded checkpoint --- setup. --- --- If delete_old_dirs is true, attempts to delete recursively the dirname --- of each path in the input checkpoint_prefixes. This is useful when --- those paths are non user-facing temporary locations. -mergeV2Checkpoints :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) -mergeV2Checkpoints' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) - --- | Computes the minimum of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -min :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -min' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Returns the min of x and y (i.e. x < y ? x : y) element-wise. --- ---
                                  ---
                                • NOTE*: Minimum supports broadcasting. More about --- broadcasting here
                                • ---
                                -minimum :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -minimum' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Pads a tensor with mirrored values. --- --- This operation pads a input with mirrored values according to --- the paddings you specify. paddings is an integer --- tensor with shape `[n, 2]`, where n is the rank of input. For --- each dimension D of input, `paddings[D, 0]` indicates how --- many values to add before the contents of input in that --- dimension, and `paddings[D, 1]` indicates how many values to add after --- the contents of input in that dimension. Both `paddings[D, --- 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)` --- (or `input.dim_size(D) - 1`) if copy_border is true (if --- false, respectively). --- --- The padded size of each dimension D of the output is: --- --- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` --- --- For example: --- --- ```prettyprint # t is [[1, 2, 3], [4, 5, 6]]. # --- paddings is [[1, 1]], [2, 2]]. # mode is SYMMETRIC. --- # rank of t is 2. pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, --- 2] [2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]] --- ``` -mirrorPad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t -mirrorPad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t - --- | Gradient op for MirrorPad op. This op folds a mirror-padded --- tensor. --- --- This operation folds the padded areas of input by --- MirrorPad according to the paddings you specify. --- paddings must be the same as paddings argument given --- to the corresponding MirrorPad op. --- --- The folded size of each dimension D of the output is: --- --- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` --- --- For example: --- --- ```prettyprint # t is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. # --- paddings is [[0, 1]], [0, 1]]. # mode is SYMMETRIC. --- # rank of t is 2. pad(t, paddings) ==> [[ 1, 5] [11, 28]] --- ``` -mirrorPadGrad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t -mirrorPadGrad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t - --- | Returns element-wise remainder of division. --- ---
                                  ---
                                • NOTE*: Mod supports broadcasting. More about broadcasting --- here
                                • ---
                                -mod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -mod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Returns x * y element-wise. --- ---
                                  ---
                                • NOTE*: Mul supports broadcasting. More about broadcasting --- here
                                • ---
                                -mul :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -mul' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Draws samples from a multinomial distribution. -multinomial :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64) -multinomial' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64) - --- | Creates an empty hash table that uses tensors as the backing store. It --- uses --- --- "open addressing" with quadratic reprobing to resolve collisions. --- --- This op creates a mutable hash table, specifying the type of its keys --- and values. Each value must be a scalar. Data can be inserted into the --- table using the insert operations. It does not support the --- initialization operation. -mutableDenseHashTable :: (MonadBuild m', TensorType key_dtype) => DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString) -mutableDenseHashTable' :: (MonadBuild m', TensorType key_dtype) => OpParams -> DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString) - --- | Creates an empty hash table. --- --- This op creates a mutable hash table, specifying the type of its keys --- and values. Each value must be a scalar. Data can be inserted into the --- table using the insert operations. It does not support the --- initialization operation. -mutableHashTable :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString) -mutableHashTable' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString) - --- | Creates an empty hash table. --- --- This op creates a mutable hash table, specifying the type of its keys --- and values. Each value must be a vector. Data can be inserted into the --- table using the insert operations. It does not support the --- initialization operation. -mutableHashTableOfTensors :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString) -mutableHashTableOfTensors' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString) - --- | Computes numerical negative value element-wise. --- --- I.e., \(y = -x\). -neg :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -neg' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Training via negative sampling. -negTrain :: (MonadBuild m') => Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' (ControlNode) -negTrain' :: (MonadBuild m') => OpParams -> Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' (ControlNode) - --- | Makes its input available to the next iteration. -nextIteration :: (TensorType t) => Tensor v'1 t -> Tensor Build t -nextIteration' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Does nothing. Only useful as a placeholder for control edges. -noOp :: (MonadBuild m') => m' (ControlNode) -noOp' :: (MonadBuild m') => OpParams -> m' (ControlNode) - --- | Greedily selects a subset of bounding boxes in descending order of --- score, --- --- pruning away boxes that have high intersection-over-union (IOU) --- overlap with previously selected boxes. Bounding boxes are supplied as --- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of --- any diagonal pair of box corners and the coordinates can be provided --- as normalized (i.e., lying in the interval [0, 1]) or absolute. Note --- that this algorithm is agnostic to where the origin is in the --- coordinate system. Note that this algorithm is invariant to orthogonal --- transformations and translations of the coordinate system; thus --- translating or reflections of the coordinate system result in the same --- boxes being selected by the algorithm. --- --- The output of this operation is a set of integers indexing into the --- input collection of bounding boxes representing the selected boxes. --- The bounding box coordinates corresponding to the selected indices can --- then be obtained using the `tf.gather operation`. For example: --- --- selected_indices = tf.image.non_max_suppression( boxes, scores, --- max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, --- selected_indices) -nonMaxSuppression :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32 -nonMaxSuppression' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32 - --- | Returns the truth value of (x != y) element-wise. --- ---
                                  ---
                                • NOTE*: NotEqual supports broadcasting. More about --- broadcasting here
                                • ---
                                -notEqual :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -notEqual' :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool - --- | Returns a one-hot tensor. --- --- The locations represented by indices in indices take value --- on_value, while all other locations take value --- off_value. --- --- If the input indices is rank N, the output will have --- rank `N+1`, The new axis is created at dimension axis --- (default: the new axis is appended at the end). --- --- If indices is a scalar the output shape will be a vector of --- length depth. --- --- If indices is a vector of length features, the --- output shape will be: ``` features x depth if axis == -1 depth x --- features if axis == 0 ``` --- --- If indices is a matrix (batch) with shape `[batch, --- features]`, the output shape will be: ``` batch x features x depth if --- axis == -1 batch x depth x features if axis == 1 depth x batch x --- features if axis == 0 ``` --- --- Examples ========= --- --- Suppose that --- --- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 --- axis = -1 ``` --- --- Then output is `[4 x 3]`: --- --- ```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) --- [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ``` --- --- Suppose that --- --- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 --- axis = 0 ``` --- --- Then output is `[3 x 4]`: --- --- ```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 --- 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ --- one_hot(1) ``` Suppose that --- --- ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = --- 0.0 axis = -1 ``` --- --- Then output is `[2 x 2 x 3]`: --- --- ```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // --- one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // --- one_hot(-1) ]``` -oneHot :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) => Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t -oneHot' :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) => OpParams -> Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t - --- | Packs a list of N rank-R tensors into one --- rank-`(R+1)` tensor. --- --- Packs the N tensors in values into a tensor with --- rank one higher than each tensor in values, by packing them --- along the axis dimension. Given a list of tensors of shape --- `(A, B, C)`; --- --- if `axis == 0` then the output tensor will have the shape --- `(N, A, B, C)`. if `axis == 1` then the output tensor will --- have the shape `(A, N, B, C)`. Etc. --- --- For example: --- --- ```prettyprint # x is [1, 4] # y is [2, 5] # --- z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # --- Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, --- 6]] ``` --- --- This is the opposite of unpack. -pack :: (TensorType t) => [Tensor v'1 t] -> Tensor Build t -pack' :: (TensorType t) => OpParams -> [Tensor v'1 t] -> Tensor Build t - --- | Pads a tensor with zeros. --- --- This operation pads a input with zeros according to the --- paddings you specify. paddings is an integer tensor --- with shape `[Dn, 2]`, where n is the rank of input. For each --- dimension D of input, `paddings[D, 0]` indicates how many --- zeros to add before the contents of input in that dimension, --- and `paddings[D, 1]` indicates how many zeros to add after the --- contents of input in that dimension. --- --- The padded size of each dimension D of the output is: --- --- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` --- --- For example: --- --- ```prettyprint # t is [[1, 1], [2, 2]] # paddings is --- [[1, 1], [2, 2]] # rank of t is 2 pad(t, paddings) ==> --- [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, --- 0, 0]] ``` -pad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t -pad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t - --- | A queue that produces elements in first-in first-out order. --- --- Variable-size shapes are allowed by setting the corresponding shape --- dimensions to 0 in the shape attr. In this case DequeueMany will pad --- up to the maximum size of any given element in the minibatch. See --- below for details. -paddingFIFOQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) -paddingFIFOQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) - --- | A queue that produces elements in first-in first-out order. --- --- Variable-size shapes are allowed by setting the corresponding shape --- dimensions to 0 in the shape attr. In this case DequeueMany will pad --- up to the maximum size of any given element in the minibatch. See --- below for details. -paddingFIFOQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle) -paddingFIFOQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle) - --- | Concatenates a list of N tensors along the first dimension. --- --- The input tensors are all required to have size 1 in the first --- dimension. --- --- For example: --- --- ```prettyprint # x is [[1, 4]] # y is [[2, 5]] # --- z is [[3, 6]] parallel_concat([x, y, z]) => [[1, 4], [2, --- 5], [3, 6]] # Pack along first dim. ``` --- --- The difference between concat and parallel_concat is that concat --- requires all of the inputs be computed before the operation will begin --- but doesn't require that the input shapes be known during graph --- construction. Parallel concat will copy pieces of the input into the --- output as they become available, in some situations this can provide a --- performance benefit. -parallelConcat :: (TensorType t) => Shape -> [Tensor v'1 t] -> Tensor Build t -parallelConcat' :: (TensorType t) => OpParams -> Shape -> [Tensor v'1 t] -> Tensor Build t - --- | Outputs random values from a normal distribution. The parameters may --- each be a --- --- scalar which applies to the entire output, or a vector of length --- shape[0] which stores the parameters for each batch. -parameterizedTruncatedNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype) -parameterizedTruncatedNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype) - --- | Transforms a vector of brain.Example protos (as strings) into typed --- tensors. -parseExample :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList (v'5) tdense -> ([Tensor Build Int64], TensorList (Build) sparse_types, [Tensor Build Int64], TensorList (Build) tdense) -parseExample' :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList (v'5) tdense -> ([Tensor Build Int64], TensorList (Build) sparse_types, [Tensor Build Int64], TensorList (Build) tdense) - --- | Transforms a scalar brain.SequenceExample proto (as strings) into --- typed tensors. -parseSingleSequenceExample :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList (v'7) tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList (Build) context_sparse_types, [Tensor Build Int64], TensorList (Build) tcontext_dense, [Tensor Build Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Int64], TensorList (Build) feature_list_dense_types) -parseSingleSequenceExample' :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList (v'7) tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList (Build) context_sparse_types, [Tensor Build Int64], TensorList (Build) tcontext_dense, [Tensor Build Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Int64], TensorList (Build) feature_list_dense_types) - --- | Transforms a serialized tensorflow.TensorProto proto into a Tensor. -parseTensor :: (TensorType out_type) => Tensor v'1 ByteString -> Tensor Build out_type -parseTensor' :: (TensorType out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type - --- | A placeholder op for a value that will be fed into the computation. --- --- N.B. This operation will fail with an error if it is executed. It is --- intended as a way to represent a value that will always be fed, and to --- provide attrs that enable the fed value to be checked at runtime. -placeholder :: (TensorType dtype) => Tensor Build dtype -placeholder' :: (TensorType dtype) => OpParams -> Tensor Build dtype - --- | A placeholder op for a value that will be fed into the computation. --- --- N.B. This operation will fail with an error if it is executed. It is --- intended as a way to represent a value that will always be fed, and to --- provide attrs that enable the fed value to be checked at runtime. -placeholderV2 :: (TensorType dtype) => Shape -> Tensor Build dtype -placeholderV2' :: (TensorType dtype) => OpParams -> Shape -> Tensor Build dtype - --- | A placeholder op that passes through input when its output is --- not fed. -placeholderWithDefault :: (TensorType dtype) => Shape -> Tensor v'1 dtype -> Tensor Build dtype -placeholderWithDefault' :: (TensorType dtype) => OpParams -> Shape -> Tensor v'1 dtype -> Tensor Build dtype - --- | Compute the polygamma function \(psi^{(n)}(x)\). --- --- The polygamma function is defined as: --- --- ``` psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) ``` where \(psi(x)\) is the --- digamma function. -polygamma :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -polygamma' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the power of one value to another. --- --- Given a tensor x and a tensor y, this operation --- computes \(x^y\) for corresponding elements in x and --- y. For example: --- --- ``` # tensor x is [[2, 2]], [3, 3]] # tensor y is --- [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] ``` -pow :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -pow' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | An identity op that triggers an error if a gradient is requested. --- --- When executed in a graph, this op outputs its input tensor as-is. --- --- When building ops to compute gradients, the TensorFlow gradient system --- will return an error when trying to lookup the gradient of this op, --- because no gradient must ever be registered for this function. This op --- exists to prevent subtle bugs from silently returning unimplemented --- gradients in some corner cases. -preventGradient :: (TensorType t) => Tensor v'1 t -> Tensor Build t -preventGradient' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Prints a list of tensors. --- --- Passes input through to output and prints `data` --- when evaluating. -print :: (MonadBuild m', TensorType t, TensorTypes u) => Tensor v'1 t -> TensorList (v'2) u -> m' (Tensor Value t) -print' :: (MonadBuild m', TensorType t, TensorTypes u) => OpParams -> Tensor v'1 t -> TensorList (v'2) u -> m' (Tensor Value t) - --- | A queue that produces elements sorted by the first component value. --- --- Note that the PriorityQueue requires the first component of any --- element to be a scalar int64, in addition to the other elements --- declared by component_types. Therefore calls to Enqueue and --- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will --- all require (resp. output) one extra entry in their input (resp. --- output) lists. -priorityQueue :: (MonadBuild m') => m' (Tensor Ref ByteString) -priorityQueue' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) - --- | A queue that produces elements sorted by the first component value. --- --- Note that the PriorityQueue requires the first component of any --- element to be a scalar int64, in addition to the other elements --- declared by component_types. Therefore calls to Enqueue and --- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will --- all require (resp. output) one extra entry in their input (resp. --- output) lists. -priorityQueueV2 :: (MonadBuild m') => m' (ResourceHandle) -priorityQueueV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) - --- | Computes the product of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -prod :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -prod' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Computes the QR decompositions of one or more matrices. --- --- Computes the QR decomposition of each inner matrix in tensor --- such that `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` --- --- ```prettyprint # a is a tensor. # q is a tensor of orthonormal --- matrices. # r is a tensor of upper triangular matrices. q, r = qr(a) --- q_full, r_full = qr(a, full_matrices=True) ``` -qr :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t) -qr' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t) - --- | Quantizes then dequantizes a tensor. --- --- This op simulates the precision loss from the quantized forward pass --- by: 1. Quantizing the tensor to fixed point numbers, which should --- match the target quantization method when it is used in inference. 2. --- Dequantizing it back to floating point numbers for the following ops, --- most likely matmul. --- --- There are different ways to quantize. This version does not use the --- full range of the output type, choosing to elide the lowest possible --- value for symmetry (e.g., output range is -127 to 127, not -128 to 127 --- for signed 8 bit quantization), so that 0.0 maps to 0. --- --- To perform this op, we first find the range of values in our tensor. --- The range we use is always centered on 0, so we find m such that --- ---
                                  ---
                                1. m = max(abs(input_min), abs(input_max)) if range_given is --- true,
                                2. ---
                                3. m = max(max(abs(min_elem(input)), abs(max_elem(input))) --- otherwise.
                                4. ---
                                --- --- Our input tensor range is then [-m, m]. --- --- Next, we choose our fixed-point quantization buckets, [min_fixed, --- max_fixed]. If signed_input is true, this is --- ---
                                  ---
                                • min_fixed, max_fixed =
                                • ---
                                • -(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - --- 1 .
                                • ---
                                --- --- Otherwise, if signed_input is false, the fixed-point range is --- ---
                                  ---
                                • min_fixed, max_fixed = [0, (1 << num_bits) - 1].
                                • ---
                                --- --- From this we compute our scaling factor, s: --- --- s = (max_fixed - min_fixed) / (2 * m). --- --- Now we can quantize and dequantize the elements of our tensor. An --- element e is transformed into e': --- --- e' = (e * s).round_to_nearest() / s. --- --- Note that we have a different number of buckets in the signed vs. --- unsigned cases. For example, if num_bits == 8, we get 254 buckets in --- the signed case vs. 255 in the unsigned case. --- --- For example, suppose num_bits = 8 and m = 1. Then --- ---
                                  ---
                                • min_fixed, max_fixed = [-127, 127], and s = (127 + 127) / 2 --- = 127.
                                • ---
                                --- --- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to {-127, -63, --- 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}. -quantizeAndDequantize :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -quantizeAndDequantize' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Convert the quantized input tensor into a lower-precision --- output, using the --- --- actual distribution of the values to maximize the usage of the lower --- bit depth and adjusting the output min and max ranges accordingly. --- ---
                                  ---
                                • input_min, input_max are scalar floats that specify the --- range for the float interpretation of the input data. For --- example, if input_min is -1.0f and input_max is 1.0f, and we are --- dealing with quint16 quantized data, then a 0 value in the 16-bit data --- should be interpreted as -1.0f, and a 65535 means 1.0f.
                                • ---
                                --- --- This operator tries to squeeze as much precision as possible into an --- output with a lower bit depth by calculating the actual min and max --- values found in the data. For example, maybe that quint16 input has no --- values lower than 16,384 and none higher than 49,152. That means only --- half the range is actually needed, all the float interpretations are --- between -0.5f and 0.5f, so if we want to compress the data into a --- quint8 output, we can use that range rather than the theoretical -1.0f --- to 1.0f that is suggested by the input min and max. --- --- In practice, this is most useful for taking output from operations --- like QuantizedMatMul that can produce higher bit-depth outputs than --- their inputs and may have large potential output ranges, but in --- practice have a distribution of input values that only uses a small --- fraction of the possible range. By feeding that output into this --- operator, we can reduce it from 32 bits down to 8 with minimal loss of --- accuracy. -quantizeDownAndShrinkRange :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -quantizeDownAndShrinkRange' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) - --- | Quantize the input tensor of type float to output --- tensor of type T. --- ---
                                  ---
                                • min_range, max_range are scalar floats that specify the --- range for the input data. The mode attribute --- controls exactly which calculations are used to convert the float --- values to their quantized equivalents.
                                • ---
                                --- --- In MIN_COMBINED mode, each value of the tensor will undergo --- the following: --- --- ``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) --- if T == qint8, out[i] -= (range(T) + 1) / 2.0 ``` here `range(T) = --- numeric_limitsT::max() - numeric_limitsT::min()` --- ---
                                  ---
                                • MIN_COMBINED Mode Example*
                                • ---
                                --- --- Assume the input is type float and has a possible range of [0.0, 6.0] --- and the output type is quint8 ([0, 255]). The min_range and max_range --- values should be specified as 0.0 and 6.0. Quantizing from float to --- quint8 will multiply each value of the input by 255/6 and cast to --- quint8. --- --- If the output type was qint8 ([-128, 127]), the operation will --- additionally subtract each value by 128 prior to casting, so that the --- range of values aligns with the range of qint8. --- --- If the mode is MIN_FIRST, then this approach is used: --- --- ``` number_of_steps = 1 << (# of bits in T) range_adjust = --- number_of_steps / (number_of_steps - 1) range = (range_max - --- range_min) * range_adjust range_scale = number_of_steps / range --- quantized = round(input * range_scale) - round(range_min * --- range_scale) + numeric_limitsT::min() quantized = --- max(quantized, numeric_limitsT::min()) quantized = --- min(quantized, numeric_limitsT::max()) ``` --- --- The biggest difference between this and MIN_COMBINED is that the --- minimum range is rounded first, before it's subtracted from the --- rounded value. With MIN_COMBINED, a small bias is introduced where --- repeated iterations of quantizing and dequantizing will introduce a --- larger and larger error. --- --- One thing to watch out for is that the operator may choose to adjust --- the requested minimum and maximum values slightly during the --- quantization process, so you should always use the output ports as the --- range for further calculations. For example, if the requested minimum --- and maximum values are close to equal, they will be separated by a --- small epsilon value to prevent ill-formed quantized buffers from being --- created. Otherwise, you can end up with buffers where all the --- quantized values map to the same float value, which causes problems --- for operations that have to perform further calculations on them. -quantizeV2 :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -quantizeV2' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) - --- | Produces the average pool of the input tensor for quantized types. -quantizedAvgPool :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -quantizedAvgPool' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) - --- | Quantized Batch normalization. --- --- This op is deprecated and will be removed in the future. Prefer --- `tf.nn.batch_normalization`. -quantizedBatchNormWithGlobalNormalization :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -quantizedBatchNormWithGlobalNormalization' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) - --- | Adds Tensor bias to Tensor input for Quantized --- types. --- --- Broadcasts the values of bias on dimensions 0..N-2 of input. -quantizedBiasAdd :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -quantizedBiasAdd' :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) - --- | Concatenates quantized tensors along one dimension. -quantizedConcat :: (TensorType t) => Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -quantizedConcat' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float) - --- | Computes a 2D convolution given quantized 4D input and filter tensors. --- --- The inputs are quantized tensors where the lowest value represents the --- real number of the associated minimum, and the highest represents the --- maximum. This means that you can only interpret the quantized output --- in the same way, by taking the returned minimum and maximum values --- into account. -quantizedConv2D :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -quantizedConv2D' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) - --- | Quantized Instance normalization. -quantizedInstanceNorm :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -quantizedInstanceNorm' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) - --- | Perform a quantized matrix multiplication of a by the matrix --- b. --- --- The inputs must be two-dimensional matrices and the inner dimension of --- a (after being transposed if transpose_a is --- non-zero) must match the outer dimension of b (after being --- transposed if transposed_b is non-zero). -quantizedMatMul :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -quantizedMatMul' :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) - --- | Produces the max pool of the input tensor for quantized types. -quantizedMaxPool :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -quantizedMaxPool' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) - --- | Computes Quantized Rectified Linear: `max(features, 0)` -quantizedRelu :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -quantizedRelu' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) - --- | Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` -quantizedRelu6 :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -quantizedRelu6' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) - --- | Computes Quantized Rectified Linear X: `min(max(features, 0), --- max_value)` -quantizedReluX :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -quantizedReluX' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) - --- | Reshapes a quantized tensor as per the Reshape op. --- --- ``` -quantizedReshape :: (TensorType t, OneOf '[Int32, Int64] tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -quantizedReshape' :: (TensorType t, OneOf '[Int32, Int64] tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) - --- | Closes the given queue. --- --- This operation signals that no more elements will be enqueued in the --- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent --- Dequeue(Many) operations will continue to succeed if sufficient --- elements remain in the queue. Subsequent Dequeue(Many) operations that --- would block will fail immediately. -queueClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) -queueClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) - --- | Closes the given queue. --- --- This operation signals that no more elements will be enqueued in the --- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent --- Dequeue(Many) operations will continue to succeed if sufficient --- elements remain in the queue. Subsequent Dequeue(Many) operations that --- would block will fail immediately. -queueCloseV2 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode) -queueCloseV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode) - --- | Dequeues a tuple of one or more tensors from the given queue. --- --- This operation has k outputs, where k is the number of components in --- the tuples stored in the given queue, and output i is the ith --- component of the dequeued tuple. --- --- N.B. If the queue is empty, this operation will block until an element --- has been dequeued (or timeout_ms elapses, if specified). -queueDequeue :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> m' (TensorList (Value) component_types) -queueDequeue' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> m' (TensorList (Value) component_types) - --- | Dequeues n tuples of one or more tensors from the given queue. --- --- If the queue is closed and there are fewer than n elements, then an --- OutOfRange error is returned. --- --- This operation concatenates queue-element component tensors along the --- 0th dimension to make a single component tensor. All of the components --- in the dequeued tuple will have size n in the 0th dimension. --- --- This operation has k outputs, where k is the number of components in --- the tuples stored in the given queue, and output i is the ith --- component of the dequeued tuple. --- --- N.B. If the queue is empty, this operation will block until n elements --- have been dequeued (or timeout_ms elapses, if specified). -queueDequeueMany :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) -queueDequeueMany' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) - --- | Dequeues n tuples of one or more tensors from the given queue. --- --- If the queue is closed and there are fewer than n elements, then an --- OutOfRange error is returned. --- --- This operation concatenates queue-element component tensors along the --- 0th dimension to make a single component tensor. All of the components --- in the dequeued tuple will have size n in the 0th dimension. --- --- This operation has k outputs, where k is the number of components in --- the tuples stored in the given queue, and output i is the ith --- component of the dequeued tuple. --- --- N.B. If the queue is empty, this operation will block until n elements --- have been dequeued (or timeout_ms elapses, if specified). -queueDequeueManyV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) -queueDequeueManyV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) - --- | Dequeues n tuples of one or more tensors from the given queue. --- --- This operation is not supported by all queues. If a queue does not --- support DequeueUpTo, then an Unimplemented error is returned. --- --- If the queue is closed and there are more than 0 but less than n --- elements remaining, then instead of returning an OutOfRange error like --- QueueDequeueMany, less than n elements are returned --- immediately. If the queue is closed and there are 0 elements left in --- the queue, then an OutOfRange error is returned just like in --- QueueDequeueMany. Otherwise the behavior is identical to --- QueueDequeueMany: --- --- This operation concatenates queue-element component tensors along the --- 0th dimension to make a single component tensor. All of the components --- in the dequeued tuple will have size n in the 0th dimension. --- --- This operation has k outputs, where k is the number of components in --- the tuples stored in the given queue, and output i is the ith --- component of the dequeued tuple. -queueDequeueUpTo :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) -queueDequeueUpTo' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) - --- | Dequeues n tuples of one or more tensors from the given queue. --- --- This operation is not supported by all queues. If a queue does not --- support DequeueUpTo, then an Unimplemented error is returned. --- --- If the queue is closed and there are more than 0 but less than n --- elements remaining, then instead of returning an OutOfRange error like --- QueueDequeueMany, less than n elements are returned --- immediately. If the queue is closed and there are 0 elements left in --- the queue, then an OutOfRange error is returned just like in --- QueueDequeueMany. Otherwise the behavior is identical to --- QueueDequeueMany: --- --- This operation concatenates queue-element component tensors along the --- 0th dimension to make a single component tensor. All of the components --- in the dequeued tuple will have size n in the 0th dimension. --- --- This operation has k outputs, where k is the number of components in --- the tuples stored in the given queue, and output i is the ith --- component of the dequeued tuple. -queueDequeueUpToV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) -queueDequeueUpToV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) - --- | Dequeues a tuple of one or more tensors from the given queue. --- --- This operation has k outputs, where k is the number of components in --- the tuples stored in the given queue, and output i is the ith --- component of the dequeued tuple. --- --- N.B. If the queue is empty, this operation will block until an element --- has been dequeued (or timeout_ms elapses, if specified). -queueDequeueV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> m' (TensorList (Value) component_types) -queueDequeueV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> m' (TensorList (Value) component_types) - --- | Enqueues a tuple of one or more tensors in the given queue. --- --- The components input has k elements, which correspond to the --- components of tuples stored in the given queue. --- --- N.B. If the queue is full, this operation will block until the given --- element has been enqueued (or timeout_ms elapses, if --- specified). -queueEnqueue :: (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) -queueEnqueue' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) - --- | Enqueues zero or more tuples of one or more tensors in the given --- queue. --- --- This operation slices each component tensor along the 0th dimension to --- make multiple queue elements. All of the tuple components must have --- the same size in the 0th dimension. --- --- The components input has k elements, which correspond to the --- components of tuples stored in the given queue. --- --- N.B. If the queue is full, this operation will block until the given --- elements have been enqueued (or timeout_ms elapses, if --- specified). -queueEnqueueMany :: (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) -queueEnqueueMany' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) - --- | Enqueues zero or more tuples of one or more tensors in the given --- queue. --- --- This operation slices each component tensor along the 0th dimension to --- make multiple queue elements. All of the tuple components must have --- the same size in the 0th dimension. --- --- The components input has k elements, which correspond to the --- components of tuples stored in the given queue. --- --- N.B. If the queue is full, this operation will block until the given --- elements have been enqueued (or timeout_ms elapses, if --- specified). -queueEnqueueManyV2 :: (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) -queueEnqueueManyV2' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) - --- | Enqueues a tuple of one or more tensors in the given queue. --- --- The components input has k elements, which correspond to the --- components of tuples stored in the given queue. --- --- N.B. If the queue is full, this operation will block until the given --- element has been enqueued (or timeout_ms elapses, if --- specified). -queueEnqueueV2 :: (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) -queueEnqueueV2' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) - --- | Computes the number of elements in the given queue. -queueSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) -queueSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) - --- | Computes the number of elements in the given queue. -queueSizeV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int32) -queueSizeV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int32) - --- | Converts one or more images from RGB to HSV. --- --- Outputs a tensor of the same shape as the images tensor, --- containing the HSV value of the pixels. The output is only well --- defined if the value in images are in `[0,1]`. --- --- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, --- and `output[..., 2]` contains value. All HSV values are in `[0,1]`. A --- hue of 0 corresponds to pure red, hue 13 is pure green, and 23 --- is pure blue. -rGBToHSV :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -rGBToHSV' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Randomly crop image. --- --- size is a 1-D int64 tensor with 2 elements representing the --- crop height and width. The values must be non negative. --- --- This Op picks a random location in image and crops a --- height by width rectangle from that location. The --- random location is picked so the cropped area will fit inside the --- original image. -randomCrop :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t) -randomCrop' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t) - --- | Outputs random values from the Gamma distribution(s) described by --- alpha. --- --- This op uses the algorithm by Marsaglia et al. to acquire samples via --- transformation-rejection from pairs of uniform and normal random --- variables. See http://dl.acm.org/citation.cfm?id=358414 -randomGamma :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) => Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t) -randomGamma' :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t) - --- | Randomly shuffles a tensor along its first dimension. --- --- The tensor is shuffled along dimension 0, such that each `value[j]` is --- mapped to one and only one `output[i]`. For example, a mapping that --- might occur for a 3x2 tensor is: --- --- ```prettyprint [[1, 2], [[5, 6], [3, 4], ==> [1, 2], [5, 6]] [3, --- 4]] ``` -randomShuffle :: (MonadBuild m', TensorType t) => Tensor v'1 t -> m' (Tensor Value t) -randomShuffle' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -> m' (Tensor Value t) - --- | A queue that randomizes the order of elements. -randomShuffleQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) -randomShuffleQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) - --- | A queue that randomizes the order of elements. -randomShuffleQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle) -randomShuffleQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle) - --- | Outputs random values from a normal distribution. --- --- The generated values will have mean 0 and standard deviation 1. -randomStandardNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype) -randomStandardNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype) - --- | Outputs random values from a uniform distribution. --- --- The generated values follow a uniform distribution in the range `[0, --- 1)`. The lower bound 0 is included in the range, while the upper bound --- 1 is excluded. -randomUniform :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype) -randomUniform' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype) - --- | Outputs random integers from a uniform distribution. --- --- The generated values are uniform integers in the range `[minval, --- maxval)`. The lower bound minval is included in the range, --- while the upper bound maxval is excluded. --- --- The random integers are slightly biased unless `maxval - minval` is an --- exact power of two. The bias is small for values of `maxval - minval` --- significantly smaller than the range of the output (either `2^32` or --- `2^64`). -randomUniformInt :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout) -randomUniformInt' :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout) - --- | Creates a sequence of numbers. --- --- This operation creates a sequence of numbers that begins at --- start and extends by increments of delta up to but --- not including limit. --- --- For example: --- --- ``` # start is 3 # limit is 18 # delta is 3 --- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ``` -range :: (OneOf '[Int32, Int64, Double, Float] tidx) => Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx -range' :: (OneOf '[Int32, Int64, Double, Float] tidx) => OpParams -> Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx - --- | Returns the rank of a tensor. --- --- This operation returns an integer representing the rank of --- input. --- --- For example: --- --- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], --- [4, 4, 4]]] # shape of tensor t is [2, 2, 3] rank(t) ==> 3 --- ``` --- ---
                                  ---
                                • *Note**: The rank of a tensor is not the same as the rank of a --- matrix. The rank of a tensor is the number of indices required to --- uniquely select each element of the tensor. Rank is also known as --- "order", "degree", or "ndims."
                                • ---
                                -rank :: (TensorType t) => Tensor v'1 t -> Tensor Build Int32 -rank' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Int32 - --- | Reads and outputs the entire contents of the input filename. -readFile :: Tensor v'1 ByteString -> Tensor Build ByteString -readFile' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString - --- | Reads the value of a variable. --- --- The tensor returned by this operation is immutable. --- --- The value returned by this operation is guaranteed to be influenced by --- all the writes on which this operation depends directly or indirectly, --- and to not be influenced by any of the writes which depend directly or --- indirectly on this operation. -readVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> m' (Tensor Value dtype) -readVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> m' (Tensor Value dtype) - --- | Returns the number of records this Reader has produced. --- --- This is the same as the number of ReaderRead executions that have --- succeeded. -readerNumRecordsProduced :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64) -readerNumRecordsProduced' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64) - --- | Returns the number of records this Reader has produced. --- --- This is the same as the number of ReaderRead executions that have --- succeeded. -readerNumRecordsProducedV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int64) -readerNumRecordsProducedV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int64) - --- | Returns the number of work units this Reader has finished processing. -readerNumWorkUnitsCompleted :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64) -readerNumWorkUnitsCompleted' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64) - --- | Returns the number of work units this Reader has finished processing. -readerNumWorkUnitsCompletedV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int64) -readerNumWorkUnitsCompletedV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int64) - --- | Returns the next record (key, value pair) produced by a Reader. --- --- Will dequeue from the input queue if necessary (e.g. when the Reader --- needs to start reading from a new file since it has finished with the --- previous file). -readerRead :: (MonadBuild m') => Tensor Ref ByteString -> Tensor Ref ByteString -> m' ((Tensor Value ByteString, Tensor Value ByteString)) -readerRead' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> m' ((Tensor Value ByteString, Tensor Value ByteString)) - --- | Returns up to num_records (key, value) pairs produced by a --- Reader. --- --- Will dequeue from the input queue if necessary (e.g. when the Reader --- needs to start reading from a new file since it has finished with the --- previous file). It may return less than num_records even --- before the last batch. -readerReadUpTo :: (MonadBuild m') => Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) -readerReadUpTo' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) - --- | Returns up to num_records (key, value) pairs produced by a --- Reader. --- --- Will dequeue from the input queue if necessary (e.g. when the Reader --- needs to start reading from a new file since it has finished with the --- previous file). It may return less than num_records even --- before the last batch. -readerReadUpToV2 :: (MonadBuild m') => ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) -readerReadUpToV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) - --- | Returns the next record (key, value pair) produced by a Reader. --- --- Will dequeue from the input queue if necessary (e.g. when the Reader --- needs to start reading from a new file since it has finished with the --- previous file). -readerReadV2 :: (MonadBuild m') => ResourceHandle -> ResourceHandle -> m' ((Tensor Value ByteString, Tensor Value ByteString)) -readerReadV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> ResourceHandle -> m' ((Tensor Value ByteString, Tensor Value ByteString)) - --- | Restore a Reader to its initial clean state. -readerReset :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) -readerReset' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) - --- | Restore a Reader to its initial clean state. -readerResetV2 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode) -readerResetV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode) - --- | Restore a reader to a previously saved state. --- --- Not all Readers support being restored, so this can produce an --- Unimplemented error. -readerRestoreState :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) -readerRestoreState' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) - --- | Restore a reader to a previously saved state. --- --- Not all Readers support being restored, so this can produce an --- Unimplemented error. -readerRestoreStateV2 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 ByteString -> m' (ControlNode) -readerRestoreStateV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 ByteString -> m' (ControlNode) - --- | Produce a string tensor that encodes the state of a Reader. --- --- Not all Readers support being serialized, so this can produce an --- Unimplemented error. -readerSerializeState :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value ByteString) -readerSerializeState' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value ByteString) - --- | Produce a string tensor that encodes the state of a Reader. --- --- Not all Readers support being serialized, so this can produce an --- Unimplemented error. -readerSerializeStateV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value ByteString) -readerSerializeStateV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value ByteString) - --- | Returns the real part of a complex number. --- --- Given a tensor input of complex numbers, this operation --- returns a tensor of type float that is the real part of each --- element in input. All elements in input must be --- complex numbers of the form \(a + bj\), where *a* is the real part --- returned by this operation and *b* is the imaginary part. --- --- For example: --- --- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] --- tf.real(input) ==> [-2.25, 3.25] ``` -real :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout -real' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout - --- | Returns x / y element-wise for real types. --- --- If x and y are reals, this will return the --- floating-point division. --- ---
                                  ---
                                • NOTE*: Div supports broadcasting. More about broadcasting --- here
                                • ---
                                -realDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -realDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the reciprocal of x element-wise. --- --- I.e., \(y = 1 / x\). -reciprocal :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -reciprocal' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the gradient for the inverse of x wrt its input. --- --- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy is --- the corresponding input gradient. -reciprocalGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -reciprocalGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Emits randomized records. -recordInput :: (MonadBuild m') => m' (Tensor Value ByteString) -recordInput' :: (MonadBuild m') => OpParams -> m' (Tensor Value ByteString) - --- | Joins a string Tensor across the given dimensions. --- --- Computes the string join across dimensions in the given string Tensor --- of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by --- joining the input strings with the given separator (default: empty --- string). Negative indices are counted backwards from the end, with --- `-1` being equivalent to `n - 1`. --- --- For example: --- --- ``` # tensor a is [["a", "b"], ["c", "d"]] tf.reduce_join(a, --- 0) ==> ["ac", "bd"] tf.reduce_join(a, 1) ==> ["ab", "cd"] --- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] --- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] --- tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] --- tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] --- tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] --- tf.reduce_join(a, [0, 1]) ==> ["acbd"] tf.reduce_join(a, [1, 0]) --- ==> ["abcd"] tf.reduce_join(a, []) ==> ["abcd"] ``` -reduceJoin :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString -reduceJoin' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString - --- | Creates or finds a child frame, and makes `data` available to the --- child frame. --- --- The unique frame_name is used by the Executor to --- identify frames. If is_constant is true, output is a --- constant in the child frame; otherwise it may be changed in the child --- frame. At most parallel_iterations iterations are run in --- parallel in the child frame. -refEnter :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) -refEnter' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) - --- | Exits the current frame to its parent frame. --- --- Exit makes its input `data` available to the parent frame. -refExit :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) -refExit' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) - --- | Return the same ref tensor as the input ref tensor. -refIdentity :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) -refIdentity' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) - --- | Forwards the value of an available tensor from inputs to --- output. --- --- Merge waits for at least one of the tensors in --- inputs to become available. It is usually combined with --- Switch to implement branching. --- --- Merge forwards the first tensor for become available to --- output, and sets value_index to its index in --- inputs. -refMerge :: (MonadBuild m', TensorType t) => [Tensor Ref t] -> m' ((Tensor Ref t, Tensor Value Int32)) -refMerge' :: (MonadBuild m', TensorType t) => OpParams -> [Tensor Ref t] -> m' ((Tensor Ref t, Tensor Value Int32)) - --- | Makes its input available to the next iteration. -refNextIteration :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) -refNextIteration' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) - --- | Forwards the indexth element of inputs to --- output. -refSelect :: (MonadBuild m', TensorType t) => Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t) -refSelect' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t) - --- | Forwards the ref tensor `data` to the output port determined by --- pred. --- --- If pred is true, the `data` input is forwarded to --- output_true. Otherwise, the data goes to --- output_false. --- --- See also Switch and Merge. -refSwitch :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 Bool -> m' ((Tensor Ref t, Tensor Ref t)) -refSwitch' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 Bool -> m' ((Tensor Ref t, Tensor Ref t)) - --- | Computes rectified linear: `max(features, 0)`. -relu :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -relu' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes rectified linear 6: `min(max(features, 0), 6)`. -relu6 :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -relu6' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes rectified linear 6 gradients for a Relu6 operation. -relu6Grad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -relu6Grad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes rectified linear gradients for a Relu operation. -reluGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -reluGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Given a quantized tensor described by (input, input_min, input_max), --- outputs a --- --- range that covers the actual values present in that tensor. This op is --- typically used to produce the requested_output_min and --- requested_output_max for Requantize. -requantizationRange :: (OneOf '[Int16, Int32, Word16, Word8] tinput) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float) -requantizationRange' :: (OneOf '[Int16, Int32, Word16, Word8] tinput) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float) - --- | Convert the quantized input tensor into a lower-precision --- output, using the --- --- output range specified with requested_output_min and --- requested_output_max. --- ---
                                  ---
                                • input_min, input_max are scalar floats that specify the --- range for the float interpretation of the input data. For --- example, if input_min is -1.0f and input_max is 1.0f, and we are --- dealing with quint16 quantized data, then a 0 value in the 16-bit data --- should be interpreted as -1.0f, and a 65535 means 1.0f.
                                • ---
                                -requantize :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -requantize' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) - --- | Reshapes a tensor. --- --- Given tensor, this operation returns a tensor that has the --- same values as tensor with shape shape. --- --- If one component of shape is the special value -1, the size of --- that dimension is computed so that the total size remains constant. In --- particular, a shape of `[-1]` flattens into 1-D. At most one --- component of shape can be -1. --- --- If shape is 1-D or higher, then the operation returns a tensor --- with shape shape filled with the values of tensor. In --- this case, the number of elements implied by shape must be the --- same as the number of elements in tensor. --- --- For example: --- --- ```prettyprint # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] # --- tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], --- [4, 5, 6], [7, 8, 9]] --- --- # tensor t is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor --- t has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2, --- 2], [3, 3, 4, 4]] --- --- # tensor t is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4, --- 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor t has shape [3, --- 2, 3] # pass '[-1]' to flatten t reshape(t, [-1]) ==> [1, --- 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] --- --- # -1 can also be used to infer the shape --- --- # -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, --- 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: --- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, --- 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) --- ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, --- 6, 6]]] --- --- # tensor t is [7] # shape `[]` reshapes to a scalar --- reshape(t, []) ==> 7 ``` -reshape :: (TensorType t, OneOf '[Int32, Int64] tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t -reshape' :: (TensorType t, OneOf '[Int32, Int64] tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t - --- | Resize images to size using area interpolation. --- --- Input images can be of different types but output images are always --- float. -resizeArea :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float -resizeArea' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float - --- | Resize images to size using bicubic interpolation. --- --- Input images can be of different types but output images are always --- float. -resizeBicubic :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float -resizeBicubic' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float - --- | Resize images to size using bilinear interpolation. --- --- Input images can be of different types but output images are always --- float. -resizeBilinear :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float -resizeBilinear' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float - --- | Computes the gradient of bilinear interpolation. -resizeBilinearGrad :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t -resizeBilinearGrad' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t - --- | Resize images to size using nearest neighbor --- interpolation. -resizeNearestNeighbor :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t -resizeNearestNeighbor' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t - --- | Computes the gradient of nearest neighbor interpolation. -resizeNearestNeighborGrad :: (OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t -resizeNearestNeighborGrad' :: (OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t - --- | Update '*var' according to the adadelta scheme. --- --- accum = rho() * accum + (1 - rho()) * grad.square(); update = --- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; --- update_accum = rho() * update_accum + (1 - rho()) * update.square(); --- var -= update; -resourceApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (ControlNode) -resourceApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (ControlNode) - --- | Update '*var' according to the adagrad scheme. --- --- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) -resourceApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' (ControlNode) -resourceApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' (ControlNode) - --- | Update '*var' according to the proximal adagrad scheme. -resourceApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (ControlNode) -resourceApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (ControlNode) - --- | Update '*var' according to the Adam algorithm. --- --- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- --- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - --- beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + --- epsilon) -resourceApplyAdam :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (ControlNode) -resourceApplyAdam' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (ControlNode) - --- | Update '*var' according to the centered RMSProp algorithm. --- --- The centered RMSProp algorithm uses an estimate of the centered second --- moment (i.e., the variance) for normalization, as opposed to regular --- RMSProp, which uses the (uncentered) second moment. This often helps --- with training, but is slightly more expensive in terms of computation --- and memory. --- --- Note that in dense implementation of this algorithm, mg, ms, and mom --- will update even if the grad is zero, but in this sparse --- implementation, mg, ms, and mom will not update in iterations during --- which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 --- mean_grad = decay * mean_grad + (1-decay) * gradient --- --- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - --- mean_grad ** 2) --- --- mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + --- (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / --- sqrt(ms - mg * mg + epsilon) var <- var - mom -resourceApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) -resourceApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) - --- | Update '*var' according to the Ftrl-proximal scheme. --- --- accum_new = accum + grad * grad linear += grad + --- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 --- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - --- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new -resourceApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) -resourceApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) - --- | Update '*var' by subtracting alpha * delta from it. -resourceApplyGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' (ControlNode) -resourceApplyGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' (ControlNode) - --- | Update '*var' according to the momentum scheme. Set use_nesterov = --- True if you --- --- want to use Nesterov momentum. --- --- accum = accum * momentum + grad var -= lr * accum -resourceApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) -resourceApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) - --- | Update '*var' and '*accum' according to FOBOS with Adagrad learning --- rate. --- --- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var --- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} -resourceApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (ControlNode) -resourceApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (ControlNode) - --- | Update '*var' as FOBOS algorithm with fixed learning rate. --- --- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * --- max{|prox_v|-alpha*l1,0} -resourceApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) -resourceApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) - --- | Update '*var' according to the RMSProp algorithm. --- --- Note that in dense implementation of this algorithm, ms and mom will --- update even if the grad is zero, but in this sparse implementation, ms --- and mom will not update in iterations during which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = --- learning_rate * gradient / sqrt(mean_square + epsilon) --- --- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * --- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom -resourceApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) -resourceApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) - --- | Gather slices from the variable pointed to by resource --- according to indices. --- --- indices must be an integer tensor of any dimension (usually --- 0-D or 1-D). Produces an output tensor with shape `indices.shape + --- params.shape[1:]` where: --- --- ```python # Scalar indices output[:, ..., :] = params[indices, :, ... --- :] --- --- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] --- --- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, --- ..., j], :, ..., :] ``` -resourceGather :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype) -resourceGather' :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype) - --- | Adds sparse updates to the variable referenced by resource. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] += updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- += updates[i, ..., j, ...] --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their contributions add. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterAdd.png" alt /div -resourceScatterAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' (ControlNode) -resourceScatterAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' (ControlNode) - --- | var: Should be from a Variable(). -resourceSparseApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (ControlNode) -resourceSparseApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (ControlNode) - --- | Update relevant entries in '*var' and '*accum' according to the --- adagrad scheme. --- --- That is for rows we have grad for, we update var and accum as follows: --- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) -resourceSparseApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (ControlNode) -resourceSparseApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (ControlNode) - --- | Update entries in '*var' and '*accum' according to the proximal --- adagrad scheme. -resourceSparseApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (ControlNode) -resourceSparseApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (ControlNode) - --- | Update '*var' according to the centered RMSProp algorithm. --- --- The centered RMSProp algorithm uses an estimate of the centered second --- moment (i.e., the variance) for normalization, as opposed to regular --- RMSProp, which uses the (uncentered) second moment. This often helps --- with training, but is slightly more expensive in terms of computation --- and memory. --- --- Note that in dense implementation of this algorithm, mg, ms, and mom --- will update even if the grad is zero, but in this sparse --- implementation, mg, ms, and mom will not update in iterations during --- which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 --- mean_grad = decay * mean_grad + (1-decay) * gradient Delta = --- learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** --- 2) --- --- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * --- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom -resourceSparseApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (ControlNode) -resourceSparseApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (ControlNode) - --- | Update relevant entries in '*var' according to the Ftrl-proximal --- scheme. --- --- That is for rows we have grad for, we update var, accum and linear as --- follows: accum_new = accum + grad * grad linear += grad + --- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 --- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - --- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new -resourceSparseApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) -resourceSparseApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) - --- | Update relevant entries in '*var' and '*accum' according to the --- momentum scheme. --- --- Set use_nesterov = True if you want to use Nesterov momentum. --- --- That is for rows we have grad for, we update var and accum as follows: --- --- accum = accum * momentum + grad var -= lr * accum -resourceSparseApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (ControlNode) -resourceSparseApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (ControlNode) - --- | Sparse update entries in '*var' and '*accum' according to FOBOS --- algorithm. --- --- That is for rows we have grad for, we update var and accum as follows: --- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / --- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} -resourceSparseApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (ControlNode) -resourceSparseApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (ControlNode) - --- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. --- --- That is for rows we have grad for, we update var as follows: prox_v = --- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * --- max{|prox_v|-alpha*l1,0} -resourceSparseApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (ControlNode) -resourceSparseApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (ControlNode) - --- | Update '*var' according to the RMSProp algorithm. --- --- Note that in dense implementation of this algorithm, ms and mom will --- update even if the grad is zero, but in this sparse implementation, ms --- and mom will not update in iterations during which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = --- learning_rate * gradient / sqrt(mean_square + epsilon) --- --- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * --- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom -resourceSparseApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (ControlNode) -resourceSparseApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (ControlNode) - --- | Restores a tensor from checkpoint files. --- --- Reads a tensor stored in one or several files. If there are several --- files (for instance because a tensor was saved as slices), --- file_pattern may contain wildcard symbols (* and --- ?) in the filename portion only, not in the directory --- portion. --- --- If a file_pattern matches several files, --- preferred_shard can be used to hint in which file the --- requested tensor is likely to be found. This op will first open the --- file at index preferred_shard in the list of matching files --- and try to restore tensors from that file. Only if some tensors or --- tensor slices are not found in that first file, then the Op opens all --- the files. Setting preferred_shard to match the value passed --- as the shard input of a matching Save Op may speed --- up Restore. This attribute only affects performance, not correctness. --- The default value -1 means files are processed in order. --- --- See also RestoreSlice. -restore :: (TensorType dt) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt -restore' :: (TensorType dt) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt - --- | Restores a tensor from checkpoint files. --- --- This is like Restore except that restored tensor can be --- listed as filling only a slice of a larger tensor. --- shape_and_slice specifies the shape of the larger tensor and --- the slice that the restored tensor covers. --- --- The shape_and_slice input has the same format as the elements --- of the shapes_and_slices input of the SaveSlices op. -restoreSlice :: (TensorType dt) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt -restoreSlice' :: (TensorType dt) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt - --- | Restores tensors from a V2 checkpoint. --- --- For backward compatibility with the V1 format, this Op currently --- allows restoring from a V1 checkpoint as well: - This Op first --- attempts to find the V2 index file pointed to by "prefix", and if --- found proceed to read it as a V2 checkpoint; - Otherwise the V1 read --- path is invoked. Relying on this behavior is not recommended, as the --- ability to fall back to read V1 might be deprecated and eventually --- removed. --- --- By default, restores the named tensors in full. If the caller wishes --- to restore specific slices of stored tensors, "shape_and_slices" --- should be non-empty strings and correspondingly well-formed. --- --- Callers must ensure all the named tensors are indeed stored in the --- checkpoint. -restoreV2 :: (TensorTypes dtypes) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (Build) dtypes -restoreV2' :: (TensorTypes dtypes) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (Build) dtypes - --- | Reverses specific dimensions of a tensor. --- --- Given a tensor, and a bool tensor dims --- representing the dimensions of tensor, this operation --- reverses each dimension i of tensor where `dims[i]` is --- True. --- --- tensor can have up to 8 dimensions. The number of dimensions --- of tensor must equal the number of elements in dims. --- In other words: --- --- `rank(tensor) = size(dims)` --- --- For example: --- --- ```prettyprint # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, --- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # --- [20, 21, 22, 23]]]] # tensor t shape is [1, 2, 3, 4] --- --- # dims is [False, False, False, True] reverse(t, dims) ==> --- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], --- [19, 18, 17, 16], [23, 22, 21, 20]]]] --- --- # dims is [False, True, False, False] reverse(t, dims) ==> --- [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2, --- 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] --- --- # dims is [False, False, True, False] reverse(t, dims) ==> --- [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16, --- 17, 18, 19], [12, 13, 14, 15]]]] ``` -reverse :: (OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t -reverse' :: (OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t - --- | Reverses variable length slices. --- --- This op first slices input along the dimension --- batch_dim, and for each slice i, reverses the first --- `seq_lengths[i]` elements along the dimension seq_dim. --- --- The elements of seq_lengths must obey `seq_lengths[i] < --- input.dims[seq_dim]`, and seq_lengths must be a vector of --- length `input.dims[batch_dim]`. --- --- The output slice i along dimension batch_dim is then --- given by input slice i, with the first `seq_lengths[i]` --- slices along dimension seq_dim reversed. --- --- For example: --- --- ```prettyprint # Given this: batch_dim = 0 seq_dim = 1 input.dims = --- (4, 8, ...) seq_lengths = [7, 2, 3, 5] --- --- # then slices of input are reversed on seq_dim, but only up to --- seq_lengths: output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] --- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] output[2, 0:3, :, --- ...] = input[2, 3:0:-1, :, ...] output[3, 0:5, :, ...] = input[3, --- 5:0:-1, :, ...] --- --- # while entries past seq_lens are copied through: output[0, 7:, :, --- ...] = input[0, 7:, :, ...] output[1, 2:, :, ...] = input[1, 2:, :, --- ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :, --- ...] = input[3, 2:, :, ...] ``` --- --- In contrast, if: --- --- ```prettyprint # Given this: batch_dim = 2 seq_dim = 0 input.dims = --- (8, ?, 4, ...) seq_lengths = [7, 2, 3, 5] --- --- # then slices of input are reversed on seq_dim, but only up to --- seq_lengths: output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] --- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] output[0:3, :, --- 2, :, ...] = input[3:0:-1, :, 2, :, ...] output[0:5, :, 3, :, ...] = --- input[5:0:-1, :, 3, :, ...] --- --- # while entries past seq_lens are copied through: output[7:, :, 0, :, --- ...] = input[7:, :, 0, :, ...] output[2:, :, 1, :, ...] = input[2:, :, --- 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] --- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] ``` -reverseSequence :: (TensorType t, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t -reverseSequence' :: (TensorType t, OneOf '[Int32, Int64] tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t - --- | Reverses specific dimensions of a tensor. --- --- NOTE `tf.reverse` has now changed behavior in preparation for 1.0. --- `tf.reverse_v2` is currently an alias that will be deprecated before --- TF 1.0. --- --- Given a tensor, and a int32 tensor axis --- representing the set of dimensions of tensor to reverse. This --- operation reverses each dimension i for which there exists --- j s.t. `axis[j] == i`. --- --- tensor can have up to 8 dimensions. The number of dimensions --- specified in axis may be 0 or more entries. If an index is --- specified more than once, a InvalidArgument error is raised. --- --- For example: --- --- ```prettyprint # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, --- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # --- [20, 21, 22, 23]]]] # tensor t shape is [1, 2, 3, 4] --- --- # dims is [3] or dims is -1 reverse(t, dims) ==> --- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], --- [19, 18, 17, 16], [23, 22, 21, 20]]]] --- --- # dims is '[1]' (or dims is '[-3]') reverse(t, dims) --- ==> [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, --- 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] --- --- # dims is '[2]' (or dims is '[-2]') reverse(t, dims) --- ==> [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, --- 23], [16, 17, 18, 19], [12, 13, 14, 15]]]] ``` -reverseV2 :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -reverseV2' :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Returns element-wise integer closest to x. --- --- If the result is midway between two representable values, the even --- representable is chosen. For example: --- --- ``` rint(-1.5) ==> -2.0 rint(0.5000001) ==> 1.0 rint([-1.7, --- -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., --- 2.] ``` -rint :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -rint' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Rounds the values of a tensor to the nearest integer, element-wise. --- --- Rounds half to even. Also known as bankers rounding. If you want to --- round according to the current system rounding mode use std::cint. -round :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -round' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes reciprocal of square root of x element-wise. --- --- I.e., \(y = 1 / sqrt{x}\). -rsqrt :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -rsqrt' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the gradient for the rsqrt of x wrt its input. --- --- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and --- dy is the corresponding input gradient. -rsqrtGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -rsqrtGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Generate a single randomly distorted bounding box for an image. --- --- Bounding box annotations are often supplied in addition to --- ground-truth labels in image recognition or object localization tasks. --- A common technique for training such a system is to randomly distort --- an image while preserving its content, i.e. *data augmentation*. This --- Op outputs a randomly distorted localization of an object, i.e. --- bounding box, given an image_size, bounding_boxes --- and a series of constraints. --- --- The output of this Op is a single bounding box that may be used to --- crop the original image. The output is returned as 3 tensors: --- begin, size and bboxes. The first 2 tensors --- can be fed directly into `tf.slice` to crop the image. The latter may --- be supplied to `tf.image.draw_bounding_boxes` to visualize what the --- bounding box looks like. --- --- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, --- x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` --- relative to the width and height of the underlying image. --- --- For example, --- --- ```python # Generate a single distorted bounding box. begin, size, --- bbox_for_draw = tf.image.sample_distorted_bounding_box( --- tf.shape(image), bounding_boxes=bounding_boxes) --- --- # Draw the bounding box in an image summary. image_with_box = --- tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) --- tf.image_summary(images_with_box, image_with_box) --- --- # Employ the bounding box to distort the image. distorted_image = --- tf.slice(image, begin, size) ``` --- --- Note that if no bounding box information is available, setting --- `use_image_if_no_bounding_boxes = true` will assume there is a single --- implicit bounding box covering the whole image. If --- use_image_if_no_bounding_boxes is false and no bounding boxes --- are supplied, an error is raised. -sampleDistortedBoundingBox :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) -sampleDistortedBoundingBox' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) - --- | Saves the input tensors to disk. --- --- The size of tensor_names must match the number of tensors in --- `data`. `data[i]` is written to filename with name --- `tensor_names[i]`. --- --- See also SaveSlices. -save :: (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList (v'3) t -> m' (ControlNode) -save' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList (v'3) t -> m' (ControlNode) - --- | Saves input tensors slices to disk. --- --- This is like Save except that tensors can be listed in the --- saved file as being a slice of a larger tensor. --- shapes_and_slices specifies the shape of the larger tensor --- and the slice that this tensor covers. shapes_and_slices must --- have as many elements as tensor_names. --- --- Elements of the shapes_and_slices input must either be: --- ---
                                  ---
                                • The empty string, in which case the corresponding tensor is saved --- normally.
                                • ---
                                • A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the --- dimI are the dimensions of the larger tensor and `slice-spec` --- specifies what part is covered by the tensor to save.
                                • ---
                                --- --- `slice-spec` itself is a :-separated list: --- `slice0:slice1:...:sliceN-1` where each sliceI is either: --- ---
                                  ---
                                • The string - meaning that the slice covers all indices of --- this dimension
                                • ---
                                • `start,length` where start and length are --- integers. In that case the slice covers length indices starting --- at start.
                                • ---
                                --- --- See also Save. -saveSlices :: (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) t -> m' (ControlNode) -saveSlices' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) t -> m' (ControlNode) - --- | Saves tensors in V2 checkpoint format. --- --- By default, saves the named tensors in full. If the caller wishes to --- save specific slices of full tensors, "shape_and_slices" should be --- non-empty strings and correspondingly well-formed. -saveV2 :: (MonadBuild m', TensorTypes dtypes) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) dtypes -> m' (ControlNode) -saveV2' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) dtypes -> m' (ControlNode) - --- | Outputs a Summary protocol buffer with scalar values. --- --- The input tags and values must have the same shape. --- The generated summary has a summary value for each tag-value pair in --- tags and values. -scalarSummary :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString -scalarSummary' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString - --- | Adds sparse updates to a variable reference. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] += updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- += updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their contributions add. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterAdd.png" alt /div -scatterAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -scatterAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) - --- | Divides a variable reference by sparse updates. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] /= updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- /= updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their contributions divide. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. -scatterDiv :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -scatterDiv' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) - --- | Multiplies sparse updates into a variable reference. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] *= updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- *= updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their contributions multiply. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. -scatterMul :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -scatterMul' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) - --- | Creates a new tensor by applying sparse updates to individual --- --- values or slices within a zero tensor of the given shape tensor --- according to indices. This operator is the inverse of the --- tf.gather_nd operator which extracts values or slices from a --- given tensor. --- --- TODO(simister): Add a link to Variable.getitem documentation on --- slice syntax. --- --- shape is a TensorShape with rank P and --- indices is a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- shape. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < --- K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of shape. --- --- updates is Tensor of rank `Q-1+P-K` with shape: --- --- ``` [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. ``` --- --- The simplest form of scatter is to insert individual elements in a --- tensor by index. For example, say we want to insert 4 scattered --- elements in a rank-1 tensor with 8 elements. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterNd1.png" alt /div --- --- In Python, this scatter operation would look like this: --- --- indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, --- 10, 11, 12]) shape = tf.constant([8]) scatter = tf.scatter_nd(indices, --- updates, shape) with tf.Session() as sess: print sess.run(scatter) --- --- The resulting tensor would look like this: --- ---
                                  ---
                                • 0, 11, 0, 10, 9, 0, 0, 12
                                • ---
                                --- --- We can also, insert entire slices of a higher rank tensor all at once. --- For example, if we wanted to insert two slices in the first dimension --- of a rank-3 tensor with two matrices of new values. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterNd2.png" alt /div --- --- In Python, this scatter operation would look like this: --- --- indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, --- 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, --- 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) shape = tf.constant([4, 4, 4]) --- scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as --- sess: print sess.run(scatter) --- --- The resulting tensor would look like this: --- ---
                                  ---
                                • [[5, 5, 5, 5 , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, --- 8]],
                                • ---
                                • [0, 0, 0, 0 , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, --- 0]],
                                • ---
                                • [5, 5, 5, 5 , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, --- 8]],
                                • ---
                                • [0, 0, 0, 0 , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, --- 0]]]
                                • ---
                                -scatterNd :: (TensorType t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t -scatterNd' :: (TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t - --- | Applies sparse addition between updates and individual values --- or slices --- --- within a given variable according to indices. --- --- ref is a Tensor with rank P and --- indices is a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < --- K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of ref. --- --- updates is Tensor of rank `Q-1+P-K` with shape: --- --- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` --- --- For example, say we want to add 4 scattered elements to a rank-1 --- tensor to 8 elements. In Python, that addition would look like this: --- --- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = --- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, --- 12]) add = tf.scatter_nd_add(ref, indices, updates) with tf.Session() --- as sess: print sess.run(add) --- --- The resulting update to ref would look like this: --- ---
                                  ---
                                • 1, 13, 3, 14, 14, 6, 7, 20
                                • ---
                                --- --- See tf.scatter_nd for more details about how to make updates to --- slices. -scatterNdAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -scatterNdAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) - --- | Applies sparse subtraction between updates and individual --- values or slices --- --- within a given variable according to indices. --- --- ref is a Tensor with rank P and --- indices is a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < --- K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of ref. --- --- updates is Tensor of rank `Q-1+P-K` with shape: --- --- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` --- --- For example, say we want to subtract 4 scattered elements from a --- rank-1 tensor with 8 elements. In Python, that subtraction would look --- like this: --- --- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = --- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, --- 12]) sub = tf.scatter_nd_sub(ref, indices, updates) with tf.Session() --- as sess: print sess.run(sub) --- --- The resulting update to ref would look like this: --- ---
                                  ---
                                • 1, -9, 3, -6, -4, 6, 7, -4
                                • ---
                                --- --- See tf.scatter_nd for more details about how to make updates to --- slices. -scatterNdSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -scatterNdSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) - --- | Applies sparse updates to individual values or slices within --- a given --- --- variable according to indices. --- --- ref is a Tensor with rank P and --- indices is a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < --- K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of ref. --- --- updates is Tensor of rank `Q-1+P-K` with shape: --- --- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` --- --- For example, say we want to update 4 scattered elements to a rank-1 --- tensor to 8 elements. In Python, that update would look like this: --- --- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = --- tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, --- 12]) update = tf.scatter_nd_update(ref, indices, updates) with --- tf.Session() as sess: print sess.run(update) --- --- The resulting update to ref would look like this: --- ---
                                  ---
                                • 1, 11, 3, 10, 9, 6, 7, 12
                                • ---
                                --- --- See tf.scatter_nd for more details about how to make updates to --- slices. -scatterNdUpdate :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -scatterNdUpdate' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) - --- | Subtracts sparse updates to a variable reference. --- --- # Scalar indices ref[indices, ...] -= updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- -= updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their (negated) contributions add. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterSub.png" alt /div -scatterSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -scatterSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) - --- | Applies sparse updates to a variable reference. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] = updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- = updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- If values in ref is to be updated more than once, because --- there are duplicate entries in indices, the order at which --- the updates happen for each value is undefined. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterUpdate.png" alt /div -scatterUpdate :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -scatterUpdate' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) - --- | Computes fingerprints of the input strings. -sdcaFprint :: Tensor v'1 ByteString -> Tensor Build Int64 -sdcaFprint' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Int64 - --- | Distributed version of Stochastic Dual Coordinate Ascent (SDCA) --- optimizer for --- --- linear models with L1 + L2 regularization. As global optimization --- objective is strongly-convex, the optimizer optimizes the dual --- objective at each step. The optimizer applies each update one example --- at a time. Examples are sampled uniformly, and the optimizer is --- learning rate free and enjoys linear convergence rate. --- --- Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; --- Zhang, Tong. 2012 arXiv1211.2717S: --- http://arxiv.org/pdf/1211.2717v1.pdf --- --- Loss objective = sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w| --- --- Adding vs. Averaging in Distributed Primal-Dual Optimization. Chenxin --- Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, --- Martin Takac http://arxiv.org/abs/1502.03508 --- --- Stochastic Dual Coordinate Ascent with Adaptive Probabilities Dominik --- Csiba, Zheng Qu, Peter Richtarik --- https://arxiv.org/abs/1502.08053 -sdcaOptimizer :: Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) -sdcaOptimizer' :: OpParams -> Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) - --- | Applies L1 regularization shrink step on the parameters. -sdcaShrinkL1 :: (MonadBuild m') => Float -> Float -> [Tensor Ref Float] -> m' (ControlNode) -sdcaShrinkL1' :: (MonadBuild m') => OpParams -> Float -> Float -> [Tensor Ref Float] -> m' (ControlNode) - --- | Computes the maximum along segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Computes a tensor such that \(output_i = max_j(data_j)\) where --- max is over j such that `segment_ids[j] == i`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/SegmentMax.png" alt /div -segmentMax :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -segmentMax' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t - --- | Computes the mean along segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Computes a tensor such that \(output_i = frac{sum_j data_j}{N}\) where --- mean is over j such that `segment_ids[j] == i` and --- N is the total number of values summed. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/SegmentMean.png" alt /div -segmentMean :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -segmentMean' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t - --- | Computes the minimum along segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Computes a tensor such that \(output_i = min_j(data_j)\) where --- min is over j such that `segment_ids[j] == i`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/SegmentMin.png" alt /div -segmentMin :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -segmentMin' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t - --- | Computes the product along segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Computes a tensor such that \(output_i = prod_j data_j\) where the --- product is over j such that `segment_ids[j] == i`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/SegmentProd.png" alt /div -segmentProd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -segmentProd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t - --- | Computes the sum along segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Computes a tensor such that \(output_i = sum_j data_j\) where sum is --- over j such that `segment_ids[j] == i`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/SegmentSum.png" alt /div -segmentSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -segmentSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t - --- | Selects elements from t or e, depending on --- condition. --- --- The t, and e tensors must all have the same shape, --- and the output will also have that shape. --- --- The condition tensor must be a scalar if t and --- e are scalars. If t and e are vectors or --- higher rank, then condition must be either a scalar, a vector --- with size matching the first dimension of t, or must have the --- same shape as t. --- --- The condition tensor acts as a mask that chooses, based on --- the value at each element, whether the corresponding element / row in --- the output should be taken from t (if true) or e (if --- false). --- --- If condition is a vector and t and e are --- higher rank matrices, then it chooses which row (outer dimension) to --- copy from t and e. If condition has the --- same shape as t and e, then it chooses which element --- to copy from t and e. --- --- For example: --- --- ```prettyprint # condition tensor is [[True, False] # [False, --- True]] # t is [[1, 2], # [3, 4]] # e is [[5, 6], # --- [7, 8]] select(condition, t, e) ==> [[1, 6], [7, 4]] --- --- # condition tensor is [True, False] # t is [[1, 2], --- # [3, 4]] # e is [[5, 6], # [7, 8]] select(condition, t, e) --- ==> [[1, 2], [7, 8]] --- --- ``` -select :: (TensorType t) => Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -select' :: (TensorType t) => OpParams -> Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t - --- | Computes the Eigen Decomposition of a batch of square self-adjoint --- matrices. --- --- The input is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices, with the same constraints as the --- single matrix SelfAdjointEig. --- --- The result is a [..., M+1, M] matrix with [..., 0,:] containing the --- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. -selfAdjointEig :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t -selfAdjointEig' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the eigen decomposition of one or more square self-adjoint --- matrices. --- --- Computes the eigenvalues and (optionally) eigenvectors of each inner --- matrix in input such that `input[..., :, :] = v[..., :, :] * --- diag(e[..., :])`. --- --- ```prettyprint # a is a tensor. # e is a tensor of eigenvalues. # v is --- a tensor of eigenvectors. e, v = self_adjoint_eig(a) e = --- self_adjoint_eig(a, compute_v=False) ``` -selfAdjointEigV2 :: (OneOf '[Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t) -selfAdjointEigV2' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t) - --- | Serialize an N-minibatch SparseTensor into an `[N, --- 3]` string Tensor. --- --- The SparseTensor must have rank R greater than 1, --- and the first dimension is treated as the minibatch dimension. --- Elements of the SparseTensor must be sorted in increasing --- order of this first dimension. The serialized SparseTensor --- objects going into each row of serialized_sparse will have --- rank `R-1`. --- --- The minibatch size N is extracted from `sparse_shape[0]`. -serializeManySparse :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString -serializeManySparse' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString - --- | Serialize a SparseTensor into a string 3-vector (1-D --- Tensor) object. -serializeSparse :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString -serializeSparse' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString - --- | Number of unique elements along last dimension of input set. --- --- Input set is a SparseTensor represented by --- set_indices, set_values, and set_shape. The --- last dimension contains values in a set, duplicates are allowed but --- ignored. --- --- If validate_indices is True, this op validates the --- order and range of set indices. -setSize :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32 -setSize' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32 - --- | Returns the shape of a tensor. --- --- This operation returns a 1-D integer tensor representing the shape of --- input. --- --- For example: --- --- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], --- [4, 4, 4]]] shape(t) ==> [2, 2, 3] ``` -shape :: (TensorType t, OneOf '[Int32, Int64] out_type) => Tensor v'1 t -> Tensor Build out_type -shape' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type - --- | Returns shape of tensors. --- --- This operation returns N 1-D integer tensors representing shape of --- `input[i]s`. -shapeN :: (TensorType t, OneOf '[Int32, Int64] out_type) => [Tensor v'1 t] -> [Tensor Build out_type] -shapeN' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> [Tensor v'1 t] -> [Tensor Build out_type] - --- | Generate a sharded filename. The filename is printf formatted as --- --- %s-%05d-of-%05d, basename, shard, num_shards. -shardedFilename :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString -shardedFilename' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString - --- | Generate a glob pattern matching all sharded file names. -shardedFilespec :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString -shardedFilespec' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString - --- | Computes sigmoid of x element-wise. --- --- Specifically, `y = 1 / (1 + exp(-x))`. -sigmoid :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -sigmoid' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the gradient of the sigmoid of x wrt its input. --- --- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and --- dy is the corresponding input gradient. -sigmoidGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -sigmoidGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Returns an element-wise indication of the sign of a number. --- --- `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`. --- --- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y --- = 0`. -sign :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -sign' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes sin of x element-wise. -sin :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -sin' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns the size of a tensor. --- --- This operation returns an integer representing the number of elements --- in input. --- --- For example: --- --- ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], --- [4, 4, 4]]]] size(t) ==> 12 ``` -size :: (TensorType t, OneOf '[Int32, Int64] out_type) => Tensor v'1 t -> Tensor Build out_type -size' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type - --- | Parses a text file and creates a batch of examples. -skipgram :: (MonadBuild m') => Int64 -> m' ((Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)) -skipgram' :: (MonadBuild m') => OpParams -> Int64 -> m' ((Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)) - --- | Return a slice from input. --- --- The output tensor is a tensor with dimensions described by size --- whose values are extracted from input starting at the offsets --- in begin. --- ---
                                  ---
                                • Requirements*: 0 <= begin[i] <= begin[i] + size[i] <= Di --- for i in [0, n)
                                • ---
                                -slice :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t -slice' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t - --- | Computes softmax activations. --- --- For each batch i and class j we have --- --- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) -softmax :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -softmax' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes softmax cross entropy cost and gradients to backpropagate. --- --- Inputs are the logits, not probabilities. -softmaxCrossEntropyWithLogits :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) -softmaxCrossEntropyWithLogits' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) - --- | Computes softplus: `log(exp(features) + 1)`. -softplus :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -softplus' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes softplus gradients for a softplus operation. -softplusGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -softplusGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes softsign: `features / (abs(features) + 1)`. -softsign :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t -softsign' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes softsign gradients for a softsign operation. -softsignGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -softsignGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | SpaceToBatch for 4-D tensors of type T. --- --- This is a legacy version of the more general SpaceToBatchND. --- --- Zero-pads and then rearranges (permutes) blocks of spatial data into --- batch. More specifically, this op outputs a copy of the input tensor --- where values from the height and width dimensions --- are moved to the batch dimension. After the zero-padding, --- both height and width of the input must be divisible --- by the block size. -spaceToBatch :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t -spaceToBatch' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t - --- | SpaceToBatch for N-D tensors of type T. --- --- This operation divides "spatial" dimensions `[1, ..., M]` of the input --- into a grid of blocks of shape block_shape, and interleaves --- these blocks with the "batch" dimension (0) such that in the output, --- the spatial dimensions `[1, ..., M]` correspond to the position within --- the grid, and the batch dimension combines both the position within a --- spatial block and the original batch position. Prior to division into --- blocks, the spatial dimensions of the input are optionally zero padded --- according to paddings. See below for a precise description. -spaceToBatchND :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t -spaceToBatchND' :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t - --- | SpaceToDepth for tensors of type T. --- --- Rearranges blocks of spatial data, into depth. More specifically, this --- op outputs a copy of the input tensor where values from the --- height and width dimensions are moved to the --- depth dimension. The attr block_size indicates the --- input block size and how the data is moved. --- ---
                                  ---
                                • Non-overlapping blocks of size `block_size x block size` are --- rearranged into depth at each location.
                                • ---
                                • The depth of the output tensor is `input_depth * block_size * --- block_size`.
                                • ---
                                • The input tensor's height and width must be divisible by --- block_size.
                                • ---
                                --- --- That is, assuming the input is in the shape: `[batch, height, width, --- depth]`, the shape of the output will be: `[batch, --- heightblock_size, widthblock_size, --- depth*block_size*block_size]` --- --- This operation requires that the input tensor be of rank 4, and that --- block_size be >=1 and a divisor of both the input --- height and width. --- --- This operation is useful for resizing the activations between --- convolutions (but keeping all data), e.g. instead of pooling. It is --- also useful for training purely convolutional models. --- --- For example, given this input of shape `[1, 2, 2, 1]`, and block_size --- of 2: --- --- ```prettyprint x = [[[[1], [2]], [[3], [4]]]] ``` --- --- This operation will output a tensor of shape `[1, 1, 1, 4]`: --- --- ```prettyprint [[[[1, 2, 3, 4]]]] ``` --- --- Here, the input has a batch of 1 and each batch element has shape `[2, --- 2, 1]`, the corresponding output will have a single element (i.e. --- width and height are both 1) and will have a depth of 4 channels (1 * --- block_size * block_size). The output element shape is `[1, 1, 4]`. --- --- For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, --- e.g. --- --- ```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, --- 12]]]] ``` --- --- This operation, for block_size of 2, will return the following tensor --- of shape `[1, 1, 1, 12]` --- --- ```prettyprint [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` --- --- Similarly, for the following input of shape `[1 4 4 1]`, and a block --- size of 2: --- --- ```prettyprint x = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]], [[9], --- [10], [13], [14]], [[11], [12], [15], [16]]]] ``` --- --- the operator will return the following tensor of shape `[1 2 2 4]`: --- --- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], --- [13, 14, 15, 16]]]] ``` -spaceToDepth :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor Build t -spaceToDepth' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t - --- | Applies a sparse gradient to a given accumulator. Does not add if --- local_step is --- --- lesser than the accumulator's global_step. -sparseAccumulatorApplyGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' (ControlNode) -sparseAccumulatorApplyGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' (ControlNode) - --- | Extracts the average sparse gradient in the given --- SparseConditionalAccumulator, --- --- provided that sufficient (i.e., more than num_required) gradients have --- been accumulated. The op will blocks until sufficient gradients have --- been accumulated. If the accumulator has already aggregated more than --- num_required gradients, it will return its average of the accumulated --- gradients. Also automatically increments the recorded global_step in --- the accumulator by 1, and resets the aggregate to 0. -sparseAccumulatorTakeGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) -sparseAccumulatorTakeGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) - --- | Adds two SparseTensor objects to produce another --- SparseTensor. --- --- The input SparseTensor objects' indices are assumed ordered --- in standard lexicographic order. If this is not the case, before this --- step run SparseReorder to restore index ordering. --- --- By default, if two values sum to zero at some index, the output --- SparseTensor would still include that particular location in --- its index, storing a zero in the corresponding value slot. To override --- this, callers can specify thresh, indicating that if the sum --- has a magnitude strictly smaller than thresh, its --- corresponding value and index would then not be included. In --- particular, `thresh == 0` (default) means everything is kept and --- actual thresholding happens only for a positive value. --- --- In the following shapes, nnz is the count after taking --- thresh into account. -sparseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -sparseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) - --- | The gradient operator for the SparseAdd op. --- --- The SparseAdd op calculates A + B, where A, B, and the sum are all --- represented as SparseTensor objects. This op takes in the --- upstream gradient w.r.t. non-empty values of the sum, and outputs the --- gradients w.r.t. the non-empty values of A and B. -sparseAddGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t) -sparseAddGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t) - --- | var: Should be from a Variable(). -sparseApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t) -sparseApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t) - --- | Update relevant entries in '*var' and '*accum' according to the --- adagrad scheme. --- --- That is for rows we have grad for, we update var and accum as follows: --- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) -sparseApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t) -sparseApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t) - --- | Update entries in '*var' and '*accum' according to the proximal --- adagrad scheme. -sparseApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t) -sparseApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t) - --- | Update '*var' according to the centered RMSProp algorithm. --- --- The centered RMSProp algorithm uses an estimate of the centered second --- moment (i.e., the variance) for normalization, as opposed to regular --- RMSProp, which uses the (uncentered) second moment. This often helps --- with training, but is slightly more expensive in terms of computation --- and memory. --- --- Note that in dense implementation of this algorithm, mg, ms, and mom --- will update even if the grad is zero, but in this sparse --- implementation, mg, ms, and mom will not update in iterations during --- which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 --- mean_grad = decay * mean_grad + (1-decay) * gradient Delta = --- learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** --- 2) --- --- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * --- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom -sparseApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t) -sparseApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t) - --- | Update relevant entries in '*var' according to the Ftrl-proximal --- scheme. --- --- That is for rows we have grad for, we update var, accum and linear as --- follows: accum_new = accum + grad * grad linear += grad + --- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 --- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - --- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new -sparseApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) -sparseApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) - --- | Update relevant entries in '*var' and '*accum' according to the --- momentum scheme. --- --- Set use_nesterov = True if you want to use Nesterov momentum. --- --- That is for rows we have grad for, we update var and accum as follows: --- --- accum = accum * momentum + grad var -= lr * accum -sparseApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t) -sparseApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t) - --- | Sparse update entries in '*var' and '*accum' according to FOBOS --- algorithm. --- --- That is for rows we have grad for, we update var and accum as follows: --- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / --- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} -sparseApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t) -sparseApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t) - --- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. --- --- That is for rows we have grad for, we update var as follows: prox_v = --- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * --- max{|prox_v|-alpha*l1,0} -sparseApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t) -sparseApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t) - --- | Update '*var' according to the RMSProp algorithm. --- --- Note that in dense implementation of this algorithm, ms and mom will --- update even if the grad is zero, but in this sparse implementation, ms --- and mom will not update in iterations during which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = --- learning_rate * gradient / sqrt(mean_square + epsilon) --- --- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * --- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom -sparseApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t) -sparseApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t) - --- | Concatenates a list of SparseTensor along the specified --- dimension. --- --- Concatenation is with respect to the dense versions of these sparse --- tensors. It is assumed that each input is a SparseTensor --- whose elements are ordered along increasing dimension number. --- --- All inputs' shapes must match, except for the concat dimension. The --- indices, values, and shapes lists must have --- the same length. --- --- The output shape is identical to the inputs', except along the concat --- dimension, where it is the sum of the inputs' sizes along that --- dimension. --- --- The output elements will be resorted to preserve the sort order along --- increasing dimension number. --- --- This op runs in `O(M log M)` time, where M is the total --- number of non-empty values across all inputs. This is due to the need --- for an internal sort in order to concatenate efficiently across an --- arbitrary dimension. --- --- For example, if `concat_dim = 1` and the inputs are --- --- sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c" --- --- sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e" --- --- then the output will be --- --- shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]: --- "c" --- --- Graphically this is equivalent to doing --- ---
                                  ---
                                • a concat [ d e ] = [ a d e ]
                                • ---
                                • b c [ ] [b c ]
                                • ---
                                -sparseConcat :: (TensorType t) => Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -sparseConcat' :: (TensorType t) => OpParams -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) - --- | A conditional accumulator for aggregating sparse gradients. The --- accumulator --- --- accepts gradients marked with local_step greater or equal to the most --- recent global_step known to the accumulator. The average can be --- extracted from the accumulator, provided sufficient gradients have --- been accumulated. Extracting the average automatically resets the --- aggregate to 0, and increments the global_step recorded by the --- accumulator. -sparseConditionalAccumulator :: (MonadBuild m') => DataType -> Shape -> m' (Tensor Ref ByteString) -sparseConditionalAccumulator' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString) - --- | Adds up a SparseTensor and a dense Tensor, using these special rules: --- ---
                                  ---
                                1. Broadcasts the dense side to have the same shape as the sparse --- side, if eligible;
                                2. ---
                                3. Then, only the dense values pointed to by the indices of the --- SparseTensor participate in the cwise addition.
                                4. ---
                                --- --- By these rules, the result is a logical SparseTensor with exactly the --- same indices and shape, but possibly with different non-zero values. --- The output of this Op is the resultant non-zero values. -sparseDenseCwiseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t -sparseDenseCwiseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t - --- | Component-wise divides a SparseTensor by a dense Tensor. --- ---
                                  ---
                                • Limitation*: this Op only broadcasts the dense side to the sparse --- side, but not the other direction.
                                • ---
                                -sparseDenseCwiseDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t -sparseDenseCwiseDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t - --- | Component-wise multiplies a SparseTensor by a dense Tensor. --- --- The output locations corresponding to the implicitly zero elements in --- the sparse tensor will be zero (i.e., will not take up storage space), --- regardless of the contents of the dense tensor (even if it's +/-INF --- and that INF*0 == NaN). --- ---
                                  ---
                                • Limitation*: this Op only broadcasts the dense side to the sparse --- side, but not the other direction.
                                • ---
                                -sparseDenseCwiseMul :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t -sparseDenseCwiseMul' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t - --- | Multiply matrix "a" by matrix "b". --- --- The inputs must be two-dimensional matrices and the inner dimension of --- "a" must match the outer dimension of "b". This op is optimized for --- the case where at least one of "a" or "b" is sparse. The breakeven for --- using this versus a dense matrix multiply on one platform was 30% zero --- values in the sparse matrix. -sparseMatMul :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) => Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float -sparseMatMul' :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) => OpParams -> Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float - --- | Computes the sum of elements across dimensions of a SparseTensor. --- --- This Op takes a SparseTensor and is the sparse counterpart to --- `tf.reduce_sum()`. In particular, this Op also returns a dense --- Tensor instead of a sparse one. --- --- Reduces sp_input along the dimensions given in --- reduction_axes. Unless keep_dims is true, the rank --- of the tensor is reduced by 1 for each entry in --- reduction_axes. If keep_dims is true, the reduced --- dimensions are retained with length 1. --- --- If reduction_axes has no entries, all dimensions are reduced, --- and a tensor with a single element is returned. Additionally, the axes --- can be negative, which are interpreted according to the indexing rules --- in Python. -sparseReduceSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t -sparseReduceSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t - --- | Computes the sum of elements across dimensions of a SparseTensor. --- --- This Op takes a SparseTensor and is the sparse counterpart to --- `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a --- SparseTensor. --- --- Reduces sp_input along the dimensions given in --- reduction_axes. Unless keep_dims is true, the rank --- of the tensor is reduced by 1 for each entry in --- reduction_axes. If keep_dims is true, the reduced --- dimensions are retained with length 1. --- --- If reduction_axes has no entries, all dimensions are reduced, --- and a tensor with a single element is returned. Additionally, the axes --- can be negative, which are interpreted according to the indexing rules --- in Python. -sparseReduceSumSparse :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -sparseReduceSumSparse' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) - --- | Reorders a SparseTensor into the canonical, row-major ordering. --- --- Note that by convention, all sparse ops preserve the canonical --- ordering along increasing dimension number. The only time ordering can --- be violated is during manual manipulation of the indices and values --- vectors to add entries. --- --- Reordering does not affect the shape of the SparseTensor. --- --- If the tensor has rank R and N non-empty values, --- input_indices has shape `[N, R]`, input_values has length --- N, and input_shape has length R. -sparseReorder :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t) -sparseReorder' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t) - --- | Reshapes a SparseTensor to represent values in a new dense shape. --- --- This operation has the same semantics as reshape on the represented --- dense tensor. The input_indices are recomputed based on the --- requested new_shape. --- --- If one component of new_shape is the special value -1, the --- size of that dimension is computed so that the total dense size --- remains constant. At most one component of new_shape can be --- -1. The number of dense elements implied by new_shape must be --- the same as the number of dense elements originally implied by --- input_shape. --- --- Reshaping does not affect the order of values in the SparseTensor. --- --- If the input tensor has rank R_in and N non-empty --- values, and new_shape has length R_out, then --- input_indices has shape `[N, R_in]`, input_shape has --- length R_in, output_indices has shape `[N, R_out]`, --- and output_shape has length R_out. -sparseReshape :: Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64) -sparseReshape' :: OpParams -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64) - --- | Computes the mean along sparse segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Like SegmentMean, but segment_ids can have rank less --- than `data`'s first dimension, selecting a subset of dimension 0, --- specified by indices. -sparseSegmentMean :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t -sparseSegmentMean' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t - --- | Computes gradients for SparseSegmentMean. --- --- Returns tensor "output" with same shape as grad, except for dimension --- 0 whose value is output_dim0. -sparseSegmentMeanGrad :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t -sparseSegmentMeanGrad' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t - --- | Computes the sum along sparse segments of a tensor divided by the sqrt --- of N. --- --- N is the size of the segment being reduced. --- --- Read the section on Segmentation for an explanation of --- segments. -sparseSegmentSqrtN :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t -sparseSegmentSqrtN' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t - --- | Computes gradients for SparseSegmentSqrtN. --- --- Returns tensor "output" with same shape as grad, except for dimension --- 0 whose value is output_dim0. -sparseSegmentSqrtNGrad :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t -sparseSegmentSqrtNGrad' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t - --- | Computes the sum along sparse segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Like SegmentSum, but segment_ids can have rank less --- than `data`'s first dimension, selecting a subset of dimension 0, --- specified by indices. --- --- For example: --- --- ```prettyprint c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) --- --- # Select two rows, one segment. tf.sparse_segment_sum(c, --- tf.constant([0, 1]), tf.constant([0, 0])) ==> [[0 0 0 0]] --- --- # Select two rows, two segment. tf.sparse_segment_sum(c, --- tf.constant([0, 1]), tf.constant([0, 1])) ==> [[ 1 2 3 4] [-1 -2 -3 --- -4]] --- --- # Select all rows, two segments. tf.sparse_segment_sum(c, --- tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) ==> [[0 0 0 0] [5 6 --- 7 8]] --- --- # Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1])) --- ``` -sparseSegmentSum :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t -sparseSegmentSum' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t - --- | Applies softmax to a batched N-D SparseTensor. --- --- The inputs represent an N-D SparseTensor with logical shape `[..., B, --- C]` (where `N >= 2`), and with indices sorted in the canonical --- lexicographic order. --- --- This op is equivalent to applying the normal `tf.nn.softmax()` to each --- innermost logical submatrix with shape `[B, C]`, but with the catch --- that *the implicitly zero elements do not participate*. Specifically, --- the algorithm is equivalent to the following: --- ---
                                  ---
                                1. Applies `tf.nn.softmax()` to a densified view of each innermost --- submatrix with shape `[B, C]`, along the size-C dimension;
                                2. ---
                                3. Masks out the original implicitly-zero locations;
                                4. ---
                                5. Renormalizes the remaining elements.
                                6. ---
                                --- --- Hence, the SparseTensor result has exactly the same non-zero --- indices and shape. -sparseSoftmax :: (OneOf '[Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t -sparseSoftmax' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t - --- | Computes softmax cross entropy cost and gradients to backpropagate. --- --- Unlike SoftmaxCrossEntropyWithLogits, this operation does not --- accept a matrix of label probabilities, but rather a single label per --- row of features. This label is considered to have probability 1.0 for --- the given row. --- --- Inputs are the logits, not probabilities. -sparseSoftmaxCrossEntropyWithLogits :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) => Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t) -sparseSoftmaxCrossEntropyWithLogits' :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) => OpParams -> Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t) - --- | Returns the element-wise max of two SparseTensors. --- --- Assumes the two SparseTensors have the same shape, i.e., no --- broadcasting. -sparseSparseMaximum :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) -sparseSparseMaximum' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) - --- | Returns the element-wise min of two SparseTensors. --- --- Assumes the two SparseTensors have the same shape, i.e., no --- broadcasting. -sparseSparseMinimum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) -sparseSparseMinimum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) - --- | Split a SparseTensor into num_split tensors along --- one dimension. --- --- If the `shape[split_dim]` is not an integer multiple of --- num_split. Slices `[0 : shape[split_dim] % num_split]` gets --- one extra dimension. For example, if `split_dim = 1` and `num_split = --- 2` and the input is --- --- input_tensor = shape = [2, 7] [ a d e ] [b c ] --- --- Graphically the output tensors are: --- --- output_tensor[0] = shape = [2, 4] [ a ] [b c ] --- --- output_tensor[1] = shape = [2, 3] [ d e ] [ ] -sparseSplit :: (TensorType t) => Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64]) -sparseSplit' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64]) - --- | Adds up a SparseTensor and a dense Tensor, producing a --- dense Tensor. --- --- This Op does not require a_indices be sorted in standard --- lexicographic order. -sparseTensorDenseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t -sparseTensorDenseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t - --- | Multiply SparseTensor (of rank 2) A by dense matrix B. --- --- No validity checking is performed on the indices of A. However, the --- following input format is recommended for optimal behavior: --- --- if adjoint_a == false: A should be sorted in lexicographically --- increasing order. Use SparseReorder if you're not sure. if adjoint_a --- == true: A should be sorted in order of increasing dimension 1 (i.e., --- "column major" order instead of "row major" order). -sparseTensorDenseMatMul :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t -sparseTensorDenseMatMul' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t - --- | Converts a sparse representation into a dense tensor. --- --- Builds an array dense with shape output_shape such --- that --- --- ```prettyprint # If sparse_indices is scalar dense[i] = (i == --- sparse_indices ? sparse_values : default_value) --- --- # If sparse_indices is a vector, then for each i --- dense[sparse_indices[i]] = sparse_values[i] --- --- # If sparse_indices is an n by d matrix, then for each i in [0, n) --- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = --- sparse_values[i] ``` --- --- All other values in dense are set to default_value. --- If sparse_values is a scalar, all sparse indices are set to --- this single value. --- --- Indices should be sorted in lexicographic order, and indices must not --- contain any repeats. If validate_indices is true, these --- properties are checked during execution. -sparseToDense :: (TensorType t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t -sparseToDense' :: (TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t - --- | Applies set operation along last dimension of 2 SparseTensor --- inputs. --- --- See SetOperationOp::SetOperationFromContext for values of --- set_operation. --- --- If validate_indices is True, --- SparseToSparseSetOperation validates the order and range of --- set1 and set2 indices. --- --- Input set1 is a SparseTensor represented by --- set1_indices, set1_values, and set1_shape. --- For set1 ranked n, 1st `n-1` dimensions must be the --- same as set2. Dimension n contains values in a set, --- duplicates are allowed but ignored. --- --- Input set2 is a SparseTensor represented by --- set2_indices, set2_values, and set2_shape. --- For set2 ranked n, 1st `n-1` dimensions must be the --- same as set1. Dimension n contains values in a set, --- duplicates are allowed but ignored. --- --- If validate_indices is True, this op validates the --- order and range of set1 and set2 indices. --- --- Output result is a SparseTensor represented by --- result_indices, result_values, and --- result_shape. For set1 and set2 ranked --- n, this has rank n and the same 1st `n-1` dimensions --- as set1 and set2. The nth dimension --- contains the result of set_operation applied to the --- corresponding `[0...n-1]` dimension of set. -sparseToSparseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -sparseToSparseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) - --- | Splits a tensor into num_split tensors along one dimension. -split :: (TensorType t) => Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t] -split' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t] - --- | Splits a tensor into num_split tensors along one dimension. -splitV :: (TensorType t, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t] -splitV' :: (TensorType t, OneOf '[Int32, Int64] tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t] - --- | Computes square root of x element-wise. --- --- I.e., \(y = sqrt{x} = x^{1/2}\). -sqrt :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -sqrt' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the gradient for the sqrt of x wrt its input. --- --- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and --- dy is the corresponding input gradient. -sqrtGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -sqrtGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes square of x element-wise. --- --- I.e., \(y = x * x = x^2\). -square :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -square' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns (x - y)(x - y) element-wise. --- ---
                                  ---
                                • NOTE*: SquaredDifference supports broadcasting. More --- about broadcasting here
                                • ---
                                -squaredDifference :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -squaredDifference' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Removes dimensions of size 1 from the shape of a tensor. --- --- Given a tensor input, this operation returns a tensor of the --- same type with all dimensions of size 1 removed. If you don't want to --- remove all size 1 dimensions, you can remove specific size 1 --- dimensions by specifying squeeze_dims. --- --- For example: --- --- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] --- shape(squeeze(t)) ==> [2, 3] ``` --- --- Or, to remove specific size 1 dimensions: --- --- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] --- shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] ``` -squeeze :: (TensorType t) => Tensor v'1 t -> Tensor Build t -squeeze' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | A stack that produces elements in first-in last-out order. -stack :: (MonadBuild m') => DataType -> m' (Tensor Ref ByteString) -stack' :: (MonadBuild m') => OpParams -> DataType -> m' (Tensor Ref ByteString) - --- | Delete the stack from its resource container. -stackClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) -stackClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) - --- | Pop the element at the top of the stack. -stackPop :: (MonadBuild m', TensorType elem_type) => Tensor Ref ByteString -> m' (Tensor Value elem_type) -stackPop' :: (MonadBuild m', TensorType elem_type) => OpParams -> Tensor Ref ByteString -> m' (Tensor Value elem_type) - --- | Push an element onto the stack. -stackPush :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t) -stackPush' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t) - --- | Stage values similar to a lightweight Enqueue. The basic functionality --- of this --- --- Op is similar to a queue with many fewer capabilities and options. --- This Op is optimized for performance. -stage :: (MonadBuild m', TensorTypes dtypes) => TensorList (v'1) dtypes -> m' (ControlNode) -stage' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> TensorList (v'1) dtypes -> m' (ControlNode) - --- | Stops gradient computation. --- --- When executed in a graph, this op outputs its input tensor as-is. --- --- When building ops to compute gradients, this op prevents the --- contribution of its inputs to be taken into account. Normally, the --- gradient generator adds ops to a graph to compute the derivatives of a --- specified loss by recursively finding out inputs that --- contributed to its computation. If you insert this op in the graph it --- inputs are masked from the gradient generator. They are not taken into --- account for computing gradients. --- --- This is useful any time you want to compute a value with TensorFlow --- but need to pretend that the value was a constant. Some examples --- include: --- ---
                                  ---
                                • The *EM* algorithm where the *M-step* should not involve --- backpropagation through the output of the *E-step*.
                                • ---
                                • Contrastive divergence training of Boltzmann machines where, when --- differentiating the energy function, the training must not --- backpropagate through the graph that generated the samples from the --- model.
                                • ---
                                • Adversarial training, where no backprop should happen through the --- adversarial example generation process.
                                • ---
                                -stopGradient :: (TensorType t) => Tensor v'1 t -> Tensor Build t -stopGradient' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Return a strided slice from input. --- --- Note, most python users will want to use the Python __getitem__ --- or __getitem__ rather than this op directly. --- --- The goal of this op is to produce a new tensor with a subset of the --- elements from the n dimensional input tensor. The --- subset is chosen using a sequence of m sparse range --- specifications encoded into the arguments of this function. Note, in --- some cases m could be equal to n, but this need not --- be the case. Each range specification entry can be one of the --- following: --- ---
                                  ---
                                • An ellipsis (...). Ellipses are used to imply zero or more --- dimensions of full-dimension selection and are produced using --- ellipsis_mask. For example, `foo[...]` is the identity --- slice.
                                • ---
                                • A new axis. This is used to insert a new shape=1 dimension and is --- produced using new_axis_mask. For example, `foo[:, ...]` --- where foo is shape `(3, 4)` produces a `(1, 3, 4)` --- tensor.
                                • ---
                                • A range `begin:end:stride`. This is used to specify how much to --- choose from a given dimension. stride can be any integer but --- 0. begin is an integer which represents the index of the --- first value to select while end represents the index of the --- last value to select. The number of values selected in each dimension --- is `end - begin` if `stride > 0` and `begin - end` if `stride < --- 0`. begin and end can be negative where `-1` is the --- last element, `-2` is the second to last. begin_mask controls --- whether to replace the explicitly given begin with an --- implicit effective value of `0` if `stride > 0` and `-1` if `stride --- < 0`. end_mask is analogous but produces the number --- required to create the largest open interval. For example, given a --- shape `(3,)` tensor `foo[:]`, the effective begin and --- end are `0` and `3`. Do not assume this is equivalent to --- `foo[0:-1]` which has an effective begin and end of --- `0` and `2`. Another example is `foo[-2::-1]` which reverses the first --- dimension of a tensor while dropping the last two (in the original --- order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is --- `[4,3]`.
                                • ---
                                • A single index. This is used to keep only elements that have a --- given index. For example (`foo[2, :]` on a shape `(5,6)` tensor --- produces a shape `(6,)` tensor. This is encoded in begin and --- end and shrink_axis_mask.
                                • ---
                                --- --- Each conceptual range specification is encoded in the op's argument. --- This encoding is best understand by considering a non-trivial example. --- In particular, `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as --- --- ```prettyprint begin = [1, 2, x, x, 0, x] # x denotes don't care --- (usually 0) end = [2, 4, x, x, -3, x] strides = [1, 1, x, x, -1, 1] --- begin_mask = 1<<4 | 1 << 5 = 48 end_mask = 1<<5 = 32 --- ellipsis_mask = 1<<3 = 8 new_axis_mask = 1<<2 4 --- shrink_axis_mask = 1<<0 ``` --- --- In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of --- the slice becomes (2, 1, 5, 5, 2, 5). Let us walk step by step through --- each argument specification. --- ---
                                  ---
                                1. The first argument in the example slice is turned into `begin = 1` --- and `end = begin + 1 = 2`. To disambiguate from the original spec --- `2:4` we also set the appropriate bit in --- shrink_axis_mask.
                                2. ---
                                3. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks --- have zero bits contributed.
                                4. ---
                                5. None is a synonym for `tf.newaxis`. This means insert a dimension --- of size 1 dimension in the final shape. Dummy values are contributed --- to begin, end and stride, while the new_axis_mask bit is set.
                                6. ---
                                7. ... grab the full ranges from as many dimensions as --- needed to fully specify a slice for every dimension of the input --- shape.
                                8. ---
                                9. `:-3:-1` shows the use of negative indices. A negative index --- i associated with a dimension that has shape s is --- converted to a positive index `s + i`. So `-1` becomes `s-1` (i.e. the --- last element). This conversion is done internally so begin, end and --- strides receive x, -3, and -1. The appropriate begin_mask bit is set --- to indicate the start range is the full range (ignoring the x).
                                10. ---
                                11. : indicates that the entire contents of the corresponding --- dimension is selected. This is equivalent to `::` or `0::1`. begin, --- end, and strides receive 0, 0, and 1, respectively. The appropriate --- bits in begin_mask and end_mask are also set.
                                12. ---
                                --- ---
                                  ---
                                • Requirements*: `0 != strides[i] for i in [0, m)` `ellipsis_mask --- must be a power of two (only one ellipsis)`
                                • ---
                                -stridedSlice :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t -stridedSlice' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t - --- | Assign value to the sliced l-value reference of ref. --- --- The values of value are assigned to the positions in the --- variable ref that are selected by the slice parameters. The --- slice parameters `begin, end, strides, etc. work --- exactly as in StridedSlice. --- --- NOTE this op currently does not support broadcasting and so --- value's shape must be exactly the shape produced by the slice --- of ref. -stridedSliceAssign :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) => Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t) -stridedSliceAssign' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t) - --- | Returns the gradient of StridedSlice. --- --- Since StridedSlice cuts out pieces of its input --- which is size shape, its gradient will have the same shape --- (which is passed here as shape). The gradient will be zero in --- any element that the slice does not select. --- --- Arguments are the same as StridedSliceGrad with the exception that --- dy is the input gradient to be propagated and shape is --- the shape of StridedSlice's input. -stridedSliceGrad :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t -stridedSliceGrad' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t - --- | Joins the strings in the given list of string tensors into one tensor; --- --- with the given separator (default is an empty separator). -stringJoin :: [Tensor v'1 ByteString] -> Tensor Build ByteString -stringJoin' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString - --- | Split elements of input based on delimiter into a --- SparseTensor. --- --- Let N be the size of source (typically N will be the batch size). --- Split each element of input based on delimiter and --- return a SparseTensor containing the splitted tokens. Empty --- tokens are ignored. --- --- delimiter can be empty, or a string of split characters. If --- delimiter is an empty string, each element of input --- is split into individual single-byte character strings, including --- splitting of UTF-8 multibyte sequences. Otherwise every character of --- delimiter is a potential split point. --- --- For example: N = 2, input[0] is 'hello world' and input[1] is 'a b c', --- then the output will be --- --- indices = [0, 0; 0, 1; 1, 0; 1, 1; 1, 2] shape = [2, 3] values = --- [hello, world, a, b, c] -stringSplit :: Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64) -stringSplit' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64) - --- | Converts each string in the input Tensor to its hash mod by a number --- of buckets. --- --- The hash function is deterministic on the content of the string within --- the process. --- --- Note that the hash function may change from time to time. This --- functionality will be deprecated and it's recommended to use --- `tf.string_to_hash_bucket_fast()` or --- `tf.string_to_hash_bucket_strong()`. -stringToHashBucket :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 -stringToHashBucket' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 - --- | Converts each string in the input Tensor to its hash mod by a number --- of buckets. --- --- The hash function is deterministic on the content of the string within --- the process and will never change. However, it is not suitable for --- cryptography. This function may be used when CPU time is scarce and --- inputs are trusted or unimportant. There is a risk of adversaries --- constructing inputs that all hash to the same bucket. To prevent this --- problem, use a strong hash function with --- `tf.string_to_hash_bucket_strong`. -stringToHashBucketFast :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 -stringToHashBucketFast' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 - --- | Converts each string in the input Tensor to its hash mod by a number --- of buckets. --- --- The hash function is deterministic on the content of the string within --- the process. The hash function is a keyed hash function, where --- attribute key defines the key of the hash function. --- key is an array of 2 elements. --- --- A strong hash is important when inputs may be malicious, e.g. URLs --- with additional components. Adversaries could try to make their inputs --- hash to the same bucket for a denial-of-service attack or to skew the --- results. A strong hash prevents this by making it dificult, if not --- infeasible, to compute inputs that hash to the same bucket. This comes --- at a cost of roughly 4x higher compute time than --- `tf.string_to_hash_bucket_fast`. -stringToHashBucketStrong :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 -stringToHashBucketStrong' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 - --- | Converts each string in the input Tensor to the specified numeric --- type. --- --- (Note that int32 overflow results in an error while float overflow --- results in a rounded value.) -stringToNumber :: (OneOf '[Int32, Float] out_type) => Tensor v'1 ByteString -> Tensor Build out_type -stringToNumber' :: (OneOf '[Int32, Float] out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type - --- | Returns x - y element-wise. --- ---
                                  ---
                                • NOTE*: Sub supports broadcasting. More about broadcasting --- here
                                • ---
                                -sub :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -sub' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Return substrings from Tensor of strings. --- --- For each string in the input Tensor, creates a substring --- starting at index pos with a total length of len. --- --- If len defines a substring that would extend beyond the --- length of the input string, then as many characters as possible are --- used. --- --- If pos is negative or specifies a character index larger than --- any of the input strings, then an InvalidArgumentError is --- thrown. --- --- pos and len must have the same shape, otherwise a --- ValueError is thrown on Op creation. --- ---
                                  ---
                                • NOTE*: Substr supports broadcasting up to two dimensions. --- More about broadcasting here
                                • ---
                                • --
                                • ---
                                --- --- Examples --- --- Using scalar pos and len: --- --- ``` input = [bHello, bWorld] position = 1 length = 3 --- --- output = [bell, borl] ``` --- --- Using pos and len with same shape as input: --- --- ``` input = [[bten, beleven, btwelve], --- [bthirteen, bfourteen, bfifteen], --- [bsixteen, bseventeen, beighteen]] position --- = [[1, 2, 3], [1, 2, 3], [1, 2, 3]] length = [[2, 3, 4], [4, 3, 2], --- [5, 5, 5]] --- --- output = [[ben, beve, blve], --- [bhirt, burt, bte], [bixtee, --- bvente, bhteen]] ``` --- --- Broadcasting pos and len onto input: --- --- ``` input = [[bten, beleven, btwelve], --- [bthirteen, bfourteen, bfifteen], --- [bsixteen, bseventeen, beighteen], --- [bnineteen, btwenty, btwentyone]] position --- = [1, 2, 3] length = [1, 2, 3] --- --- output = [[be, bev, blve], [bh, --- bur, btee], [bi, bve, --- bhte], [bi, ben, bnty]] ``` --- --- Broadcasting input onto pos and len: --- --- ``` input = bthirteen position = [1, 5, 7] length = [3, 2, 1] --- --- output = [bhir, bee, b'n"] ``` -substr :: (OneOf '[Int32, Int64] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString -substr' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString - --- | Computes the sum of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -sum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -sum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Computes the singular value decompositions of one or more matrices. --- --- Computes the SVD of each inner matrix in input such that --- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * --- transpose(v[..., :, :])` --- --- ```prettyprint # a is a tensor containing a batch of matrices. # s is --- a tensor of singular values for each matrix. # u is the tensor --- containing of left singular vectors for each matrix. # v is the tensor --- containing of right singular vectors for each matrix. s, u, v = svd(a) --- s, _, _ = svd(a, compute_uv=False) ``` -svd :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) -svd' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) - --- | Forwards `data` to the output port determined by pred. --- --- If pred is true, the `data` input is forwarded to --- output_true. Otherwise, the data goes to --- output_false. --- --- See also RefSwitch and Merge. -switch :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t) -switch' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t) - --- | A Reader that outputs the records from a TensorFlow Records file. -tFRecordReader :: (MonadBuild m') => m' (Tensor Ref ByteString) -tFRecordReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) - --- | A Reader that outputs the records from a TensorFlow Records file. -tFRecordReaderV2 :: (MonadBuild m') => m' (ResourceHandle) -tFRecordReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) - --- | Read SparseTensors from a SparseTensorsMap and --- concatenate them. --- --- The input sparse_handles must be an int64 matrix of --- shape `[N, 1]` where N is the minibatch size and the rows --- correspond to the output handles of AddSparseToTensorsMap or --- AddManySparseToTensorsMap. The ranks of the original --- SparseTensor objects that went into the given input ops must --- all match. When the final SparseTensor is created, it has --- rank one higher than the ranks of the incoming SparseTensor --- objects (they have been concatenated along a new row dimension on the --- left). --- --- The output SparseTensor object's shape values for all --- dimensions but the first are the max across the input --- SparseTensor objects' shape values for the corresponding --- dimensions. Its first shape value is N, the minibatch size. --- --- The input SparseTensor objects' indices are assumed ordered --- in standard lexicographic order. If this is not the case, after this --- step run SparseReorder to restore index ordering. --- --- For example, if the handles represent an input, which is a `[2, 3]` --- matrix representing two original SparseTensor objects: --- --- ``` index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] ``` --- --- and --- --- ``` index = [ 2] [10] values = [4, 5] shape = [30] ``` --- --- then the final SparseTensor will be: --- --- ``` index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] --- shape = [2 50] ``` -takeManySparseFromTensorsMap :: (MonadBuild m', TensorType dtype) => Tensor v'1 Int64 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) -takeManySparseFromTensorsMap' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 Int64 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) - --- | Computes tan of x element-wise. -tan :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -tan' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes hyperbolic tangent of x element-wise. -tanh :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t -tanh' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes the gradient for the tanh of x wrt its input. --- --- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and --- dy is the corresponding input gradient. -tanhGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -tanhGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Returns a tensor that may be mutated, but only persists within a --- single step. --- --- This is an experimental op for internal use only and it is possible to --- use this op in unsafe ways. DO NOT USE unless you fully understand the --- risks. --- --- It is the caller's responsibility to ensure that ref is --- eventually passed to a matching DestroyTemporaryVariable op --- after all other uses have completed. --- --- Outputs a ref to the tensor state so it may be read or modified. --- --- E.g. var = state_ops._temporary_variable([1, 2], types.float_) --- var_name = var.op.name var = state_ops.assign(var, [[4.0, 5.0]]) var = --- state_ops.assign_add(var, [[6.0, 7.0]]) final = --- state_ops._destroy_temporary_variable(var, var_name=var_name) -temporaryVariable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) -temporaryVariable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) -tensorArray :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString) -tensorArray' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString) -tensorArrayClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) -tensorArrayClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) - --- | Deprecated. Use TensorArrayCloseV3 -tensorArrayCloseV2 :: (MonadBuild m') => Tensor v'1 ByteString -> m' (ControlNode) -tensorArrayCloseV2' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> m' (ControlNode) - --- | Delete the TensorArray from its resource container. This enables --- --- the user to close and release the resource in the middle of a --- step/run. -tensorArrayCloseV3 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode) -tensorArrayCloseV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode) -tensorArrayConcat :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) -tensorArrayConcat' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) - --- | Deprecated. Use TensorArrayConcatV3 -tensorArrayConcatV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64) -tensorArrayConcatV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64) - --- | Concat the elements from the TensorArray into value value. --- --- Takes T elements of shapes --- --- ``` (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 --- x ...) ``` --- --- and concatenates them into a Tensor of shape: --- --- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` --- --- All elements must have the same shape (excepting the first dimension). -tensorArrayConcatV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) -tensorArrayConcatV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) -tensorArrayGather :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) -tensorArrayGather' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) - --- | Deprecated. Use TensorArrayGatherV3 -tensorArrayGatherV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype -tensorArrayGatherV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype - --- | Gather specific elements from the TensorArray into output --- value. --- --- All elements selected by indices must have the same shape. -tensorArrayGatherV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) -tensorArrayGatherV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) -tensorArrayGrad :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString) -tensorArrayGrad' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString) - --- | Deprecated. Use TensorArrayGradV3 -tensorArrayGradV2 :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString) -tensorArrayGradV2' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString) - --- | Creates a TensorArray for storing the gradients of values in the given --- handle. --- --- If the given TensorArray gradient already exists, returns a reference --- to it. --- --- Locks the size of the original TensorArray by disabling its dynamic --- size flag. --- ---
                                  ---
                                • *A note about the input flow_in:**
                                • ---
                                --- --- The handle flow_in forces the execution of the gradient lookup to --- occur only after certain other operations have occurred. For example, --- when the forward TensorArray is dynamically sized, writes to this --- TensorArray may resize the object. The gradient TensorArray is --- statically sized based on the size of the forward TensorArray when --- this operation executes. Furthermore, the size of the forward --- TensorArray is frozen by this call. As a result, the flow is used to --- ensure that the call to generate the gradient TensorArray only happens --- after all writes are executed. --- --- In the case of dynamically sized TensorArrays, gradient computation --- should only be performed on read operations that have themselves been --- chained via flow to occur only after all writes have executed. That --- way the final size of the forward TensorArray is known when this --- operation is called. --- ---
                                  ---
                                • *A note about the source attribute:**
                                • ---
                                --- --- TensorArray gradient calls use an accumulator TensorArray object. If --- multiple gradients are calculated and run in the same session, the --- multiple gradient nodes may accidentally flow throuth the same --- accumulator TensorArray. This double counts and generally breaks the --- TensorArray gradient flow. --- --- The solution is to identify which gradient call this particular --- TensorArray gradient is being called in. This is performed by --- identifying a unique string (e.g. "gradients", "gradients_1", ...) --- from the input gradient Tensor's name. This string is used as a suffix --- when creating the TensorArray gradient object here (the attribute --- source). --- --- The attribute source is added as a suffix to the forward --- TensorArray's name when performing the creation / lookup, so that each --- separate gradient calculation gets its own TensorArray accumulator. -tensorArrayGradV3 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 Float -> m' ((ResourceHandle, Tensor Value Float)) -tensorArrayGradV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' ((ResourceHandle, Tensor Value Float)) -tensorArrayPack :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype) -tensorArrayPack' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype) -tensorArrayRead :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) -tensorArrayRead' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) - --- | Deprecated. Use TensorArrayReadV3 -tensorArrayReadV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype -tensorArrayReadV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype - --- | Read an element from the TensorArray into output value. -tensorArrayReadV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) -tensorArrayReadV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) -tensorArrayScatter :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) -tensorArrayScatter' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) - --- | Deprecated. Use TensorArrayScatterV3 -tensorArrayScatterV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float -tensorArrayScatterV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float - --- | Scatter the data from the input value into specific TensorArray --- elements. --- --- indices must be a vector, its length must match the first dim --- of value. -tensorArrayScatterV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) -tensorArrayScatterV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) -tensorArraySize :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32) -tensorArraySize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32) - --- | Deprecated. Use TensorArraySizeV3 -tensorArraySizeV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32 -tensorArraySizeV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32 - --- | Get the current size of the TensorArray. -tensorArraySizeV3 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32) -tensorArraySizeV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32) -tensorArraySplit :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) -tensorArraySplit' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) - --- | Deprecated. Use TensorArraySplitV3 -tensorArraySplitV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float -tensorArraySplitV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float - --- | Split the data from the input value into TensorArray elements. --- --- Assuming that lengths takes on values --- --- ```(n0, n1, ..., n(T-1))``` --- --- and that value has shape --- --- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, --- --- this splits values into a TensorArray with T tensors. --- --- TensorArray index t will be the subtensor of values with starting --- position --- --- ```(n0 + n1 + ... + n(t-1), 0, 0, ...)``` --- --- and having size --- --- ```nt x d0 x d1 x ...``` -tensorArraySplitV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) -tensorArraySplitV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) -tensorArrayUnpack :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float) -tensorArrayUnpack' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float) - --- | Deprecated. Use TensorArrayV3 -tensorArrayV2 :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString) -tensorArrayV2' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString) - --- | An array of Tensors of given size, with data written via Write and --- read --- --- via Read or Pack. -tensorArrayV3 :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' ((ResourceHandle, Tensor Value Float)) -tensorArrayV3' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' ((ResourceHandle, Tensor Value Float)) -tensorArrayWrite :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) -tensorArrayWrite' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) - --- | Deprecated. Use TensorArrayGradV3 -tensorArrayWriteV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float -tensorArrayWriteV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float - --- | Push an element onto the tensor_array. -tensorArrayWriteV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) -tensorArrayWriteV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) - --- | Outputs a Summary protocol buffer with a tensor. -tensorSummary :: (TensorType t) => Tensor v'1 t -> Tensor Build ByteString -tensorSummary' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString - --- | A Reader that outputs the lines of a file delimited by '\n'. -textLineReader :: (MonadBuild m') => m' (Tensor Ref ByteString) -textLineReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) - --- | A Reader that outputs the lines of a file delimited by '\n'. -textLineReaderV2 :: (MonadBuild m') => m' (ResourceHandle) -textLineReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) - --- | Generates labels for candidate sampling with a learned unigram --- distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -threadUnsafeUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -threadUnsafeUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) - --- | Constructs a tensor by tiling a given tensor. --- --- This operation creates a new tensor by replicating input --- multiples times. The output tensor's i'th dimension has --- `input.dims(i) * multiples[i]` elements, and the values of --- input are replicated `multiples[i]` times along the --- ith dimension. For example, tiling `[a b c d]` by `[2]` --- produces `[a b c d a b c d]`. -tile :: (TensorType t, OneOf '[Int32, Int64] tmultiples) => Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t -tile' :: (TensorType t, OneOf '[Int32, Int64] tmultiples) => OpParams -> Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t - --- | Returns the gradient of Tile. --- --- Since Tile takes an input and repeats the input --- multiples times along each dimension, TileGrad takes --- in multiples and aggregates each repeated tile of --- input into output. -tileGrad :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t -tileGrad' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t - --- | Finds values and indices of the k largest elements for the --- last dimension. --- --- If the input is a vector (rank-1), finds the k largest --- entries in the vector and outputs their values and indices as vectors. --- Thus `values[j]` is the j-th largest entry in input, --- and its index is `indices[j]`. --- --- For matrices (resp. higher rank input), computes the top k --- entries in each row (resp. vector along the last dimension). Thus, --- --- values.shape = indices.shape = input.shape[:-1] + [k] --- --- If two elements are equal, the lower-index element appears first. --- --- If k varies dynamically, use TopKV2 below. -topK :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32) -topK' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32) - --- | Finds values and indices of the k largest elements for the --- last dimension. --- --- If the input is a vector (rank-1), finds the k largest --- entries in the vector and outputs their values and indices as vectors. --- Thus `values[j]` is the j-th largest entry in input, --- and its index is `indices[j]`. --- --- For matrices (resp. higher rank input), computes the top k --- entries in each row (resp. vector along the last dimension). Thus, --- --- values.shape = indices.shape = input.shape[:-1] + [k] --- --- If two elements are equal, the lower-index element appears first. -topKV2 :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32) -topKV2' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32) - --- | Shuffle dimensions of x according to a permutation. --- --- The output y has the same rank as x. The shapes of --- x and y satisfy: `y.shape[i] == x.shape[perm[i]] for --- i in [0, 1, ..., rank(x) - 1]` -transpose :: (TensorType t, OneOf '[Int32, Int64] tperm) => Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t -transpose' :: (TensorType t, OneOf '[Int32, Int64] tperm) => OpParams -> Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t - --- | Returns x / y element-wise for integer types. --- --- Truncation designates that negative numbers will round fractional --- quantities toward zero. I.e. -7 / 5 = 1. This matches C semantics but --- it is different than Python semantics. See FloorDiv for a --- division function that matches Python Semantics. --- ---
                                  ---
                                • NOTE*: TruncateDiv supports broadcasting. More about --- broadcasting here
                                • ---
                                -truncateDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -truncateDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Returns element-wise remainder of division. This emulates C semantics --- where --- --- true, this follows C semantics in that the result here is consistent --- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. --- ---
                                  ---
                                • NOTE*: Mod supports broadcasting. More about broadcasting --- here
                                • ---
                                -truncateMod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -truncateMod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Outputs random values from a truncated normal distribution. --- --- The generated values follow a normal distribution with mean 0 and --- standard deviation 1, except that values whose magnitude is more than --- 2 standard deviations from the mean are dropped and re-picked. -truncatedNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype) -truncatedNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype) - --- | Generates labels for candidate sampling with a uniform distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -uniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -uniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) - --- | Finds unique elements in a 1-D tensor. --- --- This operation returns a tensor y containing all of the --- unique elements of x sorted in the same order that they occur --- in x. This operation also returns a tensor idx the --- same size as x that contains the index of each value of --- x in the unique output y. In other words: --- --- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` --- --- For example: --- --- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, --- idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, --- 3, 4, 4] ``` -unique :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx) -unique' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx) - --- | Finds unique elements in a 1-D tensor. --- --- This operation returns a tensor y containing all of the --- unique elements of x sorted in the same order that they occur --- in x. This operation also returns a tensor idx the --- same size as x that contains the index of each value of --- x in the unique output y. Finally, it returns a --- third tensor count that contains the count of each element of --- y in x. In other words: --- --- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` --- --- For example: --- --- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, --- idx, count = unique_with_counts(x) y ==> [1, 2, 4, 7, 8] idx ==> --- [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] ``` -uniqueWithCounts :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) -uniqueWithCounts' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) - --- | Unpacks a given dimension of a rank-R tensor into --- num rank-`(R-1)` tensors. --- --- Unpacks num tensors from value by chipping it along --- the axis dimension. For example, given a tensor of shape `(A, --- B, C, D)`; --- --- If `axis == 0` then the i'th tensor in output is the slice --- `value[i, :, :, :]` and each tensor in output will have shape --- `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike --- split). --- --- If `axis == 1` then the i'th tensor in output is the slice --- `value[:, i, :, :]` and each tensor in output will have shape --- `(A, C, D)`. Etc. --- --- This is the opposite of pack. -unpack :: (TensorType t) => Int64 -> Tensor v'1 t -> [Tensor Build t] -unpack' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> [Tensor Build t] - --- | Computes the sum along segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Computes a tensor such that `(output[i] = sum_{j...} data[j...]` where --- the sum is over tuples `j...` such that `segment_ids[j...] == i`. --- Unlike SegmentSum, segment_ids need not be sorted --- and need not cover all values in the full range of valid values. --- --- If the sum is empty for a given segment ID i, `output[i] = --- 0`. --- --- num_segments should equal the number of distinct segment IDs. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/UnsortedSegmentSum.png" alt /div -unsortedSegmentSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t -unsortedSegmentSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t - --- | Op is similar to a lightweight Dequeue. The basic funtionality is --- similar to --- --- dequeue with many fewer capabilities and options. This Op is optimized --- for performance. -unstage :: (MonadBuild m', TensorTypes dtypes) => m' (TensorList (Value) dtypes) -unstage' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> m' (TensorList (Value) dtypes) - --- | Creates a handle to a Variable resource. -varHandleOp :: (MonadBuild m') => DataType -> Shape -> m' (ResourceHandle) -varHandleOp' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (ResourceHandle) - --- | Checks whether a resource handle-based variable has been initialized. -varIsInitializedOp :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Bool) -varIsInitializedOp' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Bool) - --- | Use VariableV2 instead. -variable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) -variable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) - --- | Holds state in the form of a tensor that persists across steps. --- --- Outputs a ref to the tensor state so it may be read or modified. --- TODO(zhifengc/mrry): Adds a pointer to a more detail document about --- sharing states in tensorflow. -variableV2 :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) -variableV2' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) - --- | Returns locations of true values in a boolean tensor. --- --- This operation returns the coordinates of true elements in --- input. The coordinates are returned in a 2-D tensor where the --- first dimension (rows) represents the number of true elements, and the --- second dimension (columns) represents the coordinates of the true --- elements. Keep in mind, the shape of the output tensor can vary --- depending on how many true values there are in input. Indices --- are output in row-major order. --- --- For example: --- --- ```prettyprint # input tensor is [[True, False] # [True, --- False]] # input has two true values, so output has two --- coordinates. # input has rank of 2, so coordinates have two --- indices. where(input) ==> [[0, 0], [1, 0]] --- --- # input tensor is [[[True, False] # [True, False]] # [[False, --- True] # [False, True]] # [[False, False] # [False, True]]] # --- input has 5 true values, so output has 5 coordinates. # --- input has rank of 3, so coordinates have three indices. --- where(input) ==> [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, --- 1, 1]] ``` -where' :: Tensor v'1 Bool -> Tensor Build Int64 -where'' :: OpParams -> Tensor v'1 Bool -> Tensor Build Int64 - --- | A Reader that outputs the entire contents of a file as a value. --- --- To use, enqueue filenames in a Queue. The output of ReaderRead will be --- a filename (key) and the contents of that file (value). -wholeFileReader :: (MonadBuild m') => m' (Tensor Ref ByteString) -wholeFileReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) - --- | A Reader that outputs the entire contents of a file as a value. --- --- To use, enqueue filenames in a Queue. The output of ReaderRead will be --- a filename (key) and the contents of that file (value). -wholeFileReaderV2 :: (MonadBuild m') => m' (ResourceHandle) -wholeFileReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) - --- | Writes contents to the file at input filename. Creates file if not --- existing. -writeFile :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) -writeFile' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) - --- | Returns a tensor of zeros with the same shape and type as x. -zerosLike :: (TensorType t) => Tensor v'1 t -> Tensor Build t -zerosLike' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Compute the Hurwitz zeta function \(zeta(x, q)\). --- --- The Hurwitz zeta function is defined as: --- --- ``` zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} ``` -zeta :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -zeta' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | A graph node which represents an argument to a function. -_Arg :: (MonadBuild m', TensorType t) => Int64 -> m' (Tensor Value t) -_Arg' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> m' (Tensor Value t) - --- | Converts an array of tensors to a list of tensors. -_ArrayToList :: (TensorType t, TensorTypes out_types) => [Tensor v'1 t] -> TensorList (Build) out_types -_ArrayToList' :: (TensorType t, TensorTypes out_types) => OpParams -> [Tensor v'1 t] -> TensorList (Build) out_types - --- | Cast x of type SrcT to y of DstT. --- --- _HostCast requires its input and produces its output in host memory. -_HostCast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT -_HostCast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT - --- | Receives the named tensor from send_device on recv_device. --- --- _HostRecv requires its input on host memory whereas _Recv requires its --- input on device memory. -_HostRecv :: (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type) -_HostRecv' :: (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type) - --- | Sends the named tensor from send_device to recv_device. --- --- _HostSend requires its input on host memory whereas _Send requires its --- input on device memory. -_HostSend :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode) -_HostSend' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode) - --- | Converts a list of tensors to an array of tensors. -_ListToArray :: (TensorTypes tin, TensorType t) => Int64 -> TensorList (v'1) tin -> [Tensor Build t] -_ListToArray' :: (TensorTypes tin, TensorType t) => OpParams -> Int64 -> TensorList (v'1) tin -> [Tensor Build t] - --- | Creates an empty Tensor with shape shape and type --- dtype. --- --- The memory can optionally be initialized. This is usually useful in --- conjunction with inplace operations. -_ParallelConcatStart :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Value dtype) -_ParallelConcatStart' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Value dtype) - --- | Updates input value at loc with update. --- --- If you use this function you will almost certainly want to add a --- control dependency as done in the implementation of parallel_stack to --- avoid race conditions. -_ParallelConcatUpdate :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -_ParallelConcatUpdate' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Receives the named tensor from send_device on recv_device. -_Recv :: (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type) -_Recv' :: (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type) - --- | A graph node which represents a return value of a function. -_Retval :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode) -_Retval' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode) - --- | Sends the named tensor from send_device to recv_device. -_Send :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode) -_Send' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode) diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/LICENSE b/docs/haddock/tensorflow-logging-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/TensorFlow-Logging.html b/docs/haddock/tensorflow-logging-0.1.0.0/TensorFlow-Logging.html index 17fa71a..b7f04e8 100644 --- a/docs/haddock/tensorflow-logging-0.1.0.0/TensorFlow-Logging.html +++ b/docs/haddock/tensorflow-logging-0.1.0.0/TensorFlow-Logging.html @@ -1,7 +1,7 @@ -TensorFlow.Logging

                                tensorflow-logging-0.1.0.0: TensorBoard related functionality.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.Logging

                                Description

                                TensorBoard Summary generation. Provides type safe wrappers around raw +

                                tensorflow-logging-0.1.0.0: TensorBoard related functionality.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.Logging

                                Description

                                TensorBoard Summary generation. Provides type safe wrappers around raw string emitting CoreOps.

                                Example use:

                                -- Call summary functions while constructing the graph.
                                 createModel = do
                                   loss <- -- ...
                                @@ -16,6 +16,6 @@ train = TF.withEventWriter "/path/to/logs" $ \eventWriter -> do
                                                 ((), summaryBytes) <- TF.run (trainStep, summaryTensor)
                                                 let summary = decodeMessageOrDie (TF.unScalar summaryBytes)
                                                 TF.logSummary eventWriter step summary
                                -            else TF.run_ trainStep

                                Documentation

                                data EventWriter

                                Handle for logging TensorBoard events safely from multiple threads.

                                withEventWriter

                                Arguments

                                :: (MonadIO m, MonadMask m) 
                                => FilePath

                                logdir. Local filesystem directory where event file will be written.

                                -> (EventWriter -> m a) 
                                -> m a 

                                Writes Event protocol buffers to event files.

                                logEvent :: MonadIO m => EventWriter -> Event -> m ()

                                Logs the given Event protocol buffer.

                                logSummary :: MonadIO m => EventWriter -> Int64 -> Summary -> m ()

                                Logs the given Summary event with an optional global step (use 0 if not - applicable).

                                type SummaryTensor = Tensor Value ByteString

                                Synonym for the tensors that return serialized Summary proto.

                                histogramSummary :: (MonadBuild m, TensorType t, t /= ByteString, t /= Bool) => ByteString -> Tensor v t -> m ()

                                Adds a histogramSummary node. The tag argument is intentionally - limited to a single value for simplicity.

                                scalarSummary :: (TensorType t, t /= ByteString, t /= Bool, MonadBuild m) => ByteString -> Tensor v t -> m ()

                                Adds a scalarSummary node.

                                mergeAllSummaries :: MonadBuild m => m SummaryTensor

                                Merge all summaries accumulated in the Build into one summary.

                                \ No newline at end of file + else TF.run_ trainStep

                                Documentation

                                data EventWriter Source #

                                Handle for logging TensorBoard events safely from multiple threads.

                                withEventWriter Source #

                                Arguments

                                :: (MonadIO m, MonadMask m) 
                                => FilePath

                                logdir. Local filesystem directory where event file will be written.

                                -> (EventWriter -> m a) 
                                -> m a 

                                Writes Event protocol buffers to event files.

                                logEvent :: MonadIO m => EventWriter -> Event -> m () Source #

                                Logs the given Event protocol buffer.

                                logGraph :: MonadIO m => EventWriter -> Build a -> m () Source #

                                Logs the graph for the given Build action.

                                logSummary :: MonadIO m => EventWriter -> Int64 -> Summary -> m () Source #

                                Logs the given Summary event with an optional global step (use 0 if not + applicable).

                                type SummaryTensor = Tensor Value ByteString #

                                Synonym for the tensors that return serialized Summary proto.

                                histogramSummary :: (MonadBuild m, TensorType t, t /= ByteString, t /= Bool) => ByteString -> Tensor v t -> m () Source #

                                Adds a histogramSummary node. The tag argument is intentionally + limited to a single value for simplicity.

                                mergeAllSummaries :: MonadBuild m => m SummaryTensor Source #

                                Merge all summaries accumulated in the Build into one summary.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-logging-0.1.0.0/doc-index.html index 08e92a2..caeab90 100644 --- a/docs/haddock/tensorflow-logging-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-logging-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-logging-0.1.0.0: TensorBoard related functionality. (Index)

                                tensorflow-logging-0.1.0.0: TensorBoard related functionality.

                                Index

                                EventWriterTensorFlow.Logging
                                histogramSummaryTensorFlow.Logging
                                logEventTensorFlow.Logging
                                logSummaryTensorFlow.Logging
                                mergeAllSummariesTensorFlow.Logging
                                scalarSummaryTensorFlow.Logging
                                SummaryTensorTensorFlow.Logging
                                withEventWriterTensorFlow.Logging
                                \ No newline at end of file +

                                tensorflow-logging-0.1.0.0: TensorBoard related functionality.

                                Index

                                EventWriterTensorFlow.Logging
                                histogramSummaryTensorFlow.Logging
                                logEventTensorFlow.Logging
                                logGraphTensorFlow.Logging
                                logSummaryTensorFlow.Logging
                                mergeAllSummariesTensorFlow.Logging
                                scalarSummaryTensorFlow.Logging
                                SummaryTensorTensorFlow.Logging
                                withEventWriterTensorFlow.Logging
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/frames.html b/docs/haddock/tensorflow-logging-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-logging-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-logging-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-logging-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-logging-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-logging-0.1.0.0/index-frames.html deleted file mode 100644 index 0357595..0000000 --- a/docs/haddock/tensorflow-logging-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-logging-0.1.0.0: TensorBoard related functionality. \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/index.html b/docs/haddock/tensorflow-logging-0.1.0.0/index.html index 4a5d4ad..b3eb6fb 100644 --- a/docs/haddock/tensorflow-logging-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-logging-0.1.0.0/index.html @@ -1,4 +1,4 @@ -tensorflow-logging-0.1.0.0: TensorBoard related functionality.

                                tensorflow-logging-0.1.0.0: TensorBoard related functionality.

                                tensorflow-logging-0.1.0.0: TensorBoard related functionality.

                                Please see README.md

                                Modules

                                \ No newline at end of file +

                                tensorflow-logging-0.1.0.0: TensorBoard related functionality.

                                tensorflow-logging-0.1.0.0: TensorBoard related functionality.

                                Please see README.md

                                Modules

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/mini_TensorFlow-Logging.html b/docs/haddock/tensorflow-logging-0.1.0.0/mini_TensorFlow-Logging.html index 67af259..1803221 100644 --- a/docs/haddock/tensorflow-logging-0.1.0.0/mini_TensorFlow-Logging.html +++ b/docs/haddock/tensorflow-logging-0.1.0.0/mini_TensorFlow-Logging.html @@ -1,4 +1,4 @@ -TensorFlow.Logging

                                TensorFlow.Logging

                                \ No newline at end of file +

                                TensorFlow.Logging

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/ocean.css b/docs/haddock/tensorflow-logging-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-logging-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-logging-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/src/TensorFlow.Logging.html b/docs/haddock/tensorflow-logging-0.1.0.0/src/TensorFlow.Logging.html new file mode 100644 index 0000000..8c0f39d --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/src/TensorFlow.Logging.html @@ -0,0 +1,170 @@ +
                                -- Copyright 2016 TensorFlow authors.
                                +--
                                +-- Licensed under the Apache License, Version 2.0 (the "License");
                                +-- you may not use this file except in compliance with the License.
                                +-- You may obtain a copy of the License at
                                +--
                                +--     http://www.apache.org/licenses/LICENSE-2.0
                                +--
                                +-- Unless required by applicable law or agreed to in writing, software
                                +-- distributed under the License is distributed on an "AS IS" BASIS,
                                +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                                +-- See the License for the specific language governing permissions and
                                +-- limitations under the License.
                                +
                                +-- | TensorBoard Summary generation. Provides type safe wrappers around raw
                                +-- string emitting CoreOps.
                                +--
                                +-- Example use:
                                +--
                                +-- > -- Call summary functions while constructing the graph.
                                +-- > createModel = do
                                +-- >   loss <- -- ...
                                +-- >   TF.scalarSummary loss
                                +-- >
                                +-- > -- Write summaries to an EventWriter.
                                +-- > train = TF.withEventWriter "/path/to/logs" $ \eventWriter -> do
                                +-- >     summaryTensor <- TF.build TF.allSummaries
                                +-- >     forM_ [1..] $ \step -> do
                                +-- >         if (step % 100 == 0)
                                +-- >             then do
                                +-- >                 ((), summaryBytes) <- TF.run (trainStep, summaryTensor)
                                +-- >                 let summary = decodeMessageOrDie (TF.unScalar summaryBytes)
                                +-- >                 TF.logSummary eventWriter step summary
                                +-- >             else TF.run_ trainStep
                                +
                                +{-# LANGUAGE TypeOperators #-}
                                +
                                +module TensorFlow.Logging
                                +    ( EventWriter
                                +    , withEventWriter
                                +    , logEvent
                                +    , logGraph
                                +    , logSummary
                                +    , SummaryTensor
                                +    , histogramSummary
                                +    , scalarSummary
                                +    , mergeAllSummaries
                                +    ) where
                                +
                                +import Control.Concurrent (forkFinally)
                                +import Control.Concurrent.MVar (MVar, newEmptyMVar, readMVar, putMVar)
                                +import Control.Concurrent.STM (atomically)
                                +import Control.Concurrent.STM.TBMQueue (TBMQueue, newTBMQueueIO, closeTBMQueue, writeTBMQueue)
                                +import Control.Monad.Catch (MonadMask, bracket)
                                +import Control.Monad.IO.Class (MonadIO, liftIO)
                                +import Control.Monad.Trans.Resource (runResourceT)
                                +import Data.ByteString (ByteString)
                                +import Data.Conduit ((=$=))
                                +import Data.Conduit.TQueue (sourceTBMQueue)
                                +import Data.Default (def)
                                +import Data.Int (Int64)
                                +import Data.ProtoLens (encodeMessage)
                                +import Data.Time.Clock (getCurrentTime)
                                +import Data.Time.Clock.POSIX (utcTimeToPOSIXSeconds)
                                +import Lens.Family2 ((.~), (&))
                                +import Network.HostName (getHostName)
                                +import Proto.Tensorflow.Core.Framework.Summary (Summary)
                                +import Proto.Tensorflow.Core.Util.Event (Event, fileVersion, graphDef, step, summary, wallTime)
                                +import System.Directory (createDirectoryIfMissing)
                                +import System.FilePath ((</>))
                                +import TensorFlow.Build (MonadBuild, Build, asGraphDef)
                                +import TensorFlow.Ops (scalar)
                                +import TensorFlow.Records.Conduit (sinkTFRecords)
                                +import TensorFlow.Tensor (Tensor, render, SummaryTensor, addSummary, collectAllSummaries)
                                +import TensorFlow.Types (TensorType, type(/=))
                                +import Text.Printf (printf)
                                +import qualified Data.ByteString.Lazy as L
                                +import qualified Data.Conduit as Conduit
                                +import qualified Data.Conduit.List as Conduit
                                +import qualified Data.Text as T
                                +import qualified TensorFlow.GenOps.Core as CoreOps
                                +
                                +-- | Handle for logging TensorBoard events safely from multiple threads.
                                +data EventWriter = EventWriter (TBMQueue Event) (MVar ())
                                +
                                +-- | Writes Event protocol buffers to event files.
                                +withEventWriter ::
                                +    (MonadIO m, MonadMask m)
                                +    => FilePath
                                +    -- ^ logdir. Local filesystem directory where event file will be written.
                                +    -> (EventWriter -> m a)
                                +    -> m a
                                +withEventWriter logdir =
                                +    bracket (liftIO (newEventWriter logdir)) (liftIO . closeEventWriter)
                                +
                                +newEventWriter :: FilePath -> IO EventWriter
                                +newEventWriter logdir = do
                                +    createDirectoryIfMissing True logdir
                                +    t <- doubleWallTime
                                +    hostname <- getHostName
                                +    let filename = printf (logdir </> "events.out.tfevents.%010d.%s")
                                +                          (truncate t :: Integer) hostname
                                +    -- Asynchronously consume events from a queue.
                                +    -- We use a bounded queue to ensure the producer doesn't get too far ahead
                                +    -- of the consumer. The buffer size was picked arbitrarily.
                                +    q <- newTBMQueueIO 1024
                                +    -- Use an MVar to signal that the worker thread has completed.
                                +    done <- newEmptyMVar
                                +    let writer = EventWriter q done
                                +        consumeQueue = runResourceT $ Conduit.runConduit $
                                +            sourceTBMQueue q
                                +            =$= Conduit.map (L.fromStrict . encodeMessage)
                                +            =$= sinkTFRecords filename
                                +    _ <- forkFinally consumeQueue (\_ -> putMVar done ())
                                +    logEvent writer $ def & wallTime .~ t
                                +                          & fileVersion .~ T.pack "brain.Event:2"
                                +    return writer
                                +
                                +closeEventWriter :: EventWriter -> IO ()
                                +closeEventWriter (EventWriter q done) =
                                +    atomically (closeTBMQueue q) >> readMVar done
                                +
                                +-- | Logs the given Event protocol buffer.
                                +logEvent :: MonadIO m => EventWriter -> Event -> m ()
                                +logEvent (EventWriter q _) pb = liftIO (atomically (writeTBMQueue q pb))
                                +
                                +-- | Logs the graph for the given 'Build' action.
                                +logGraph :: MonadIO m => EventWriter -> Build a -> m ()
                                +logGraph writer build = do
                                +  let graph = asGraphDef build
                                +      graphBytes = encodeMessage graph
                                +      graphEvent = (def :: Event) & graphDef .~ graphBytes
                                +  logEvent writer graphEvent
                                +
                                +-- | Logs the given Summary event with an optional global step (use 0 if not
                                +-- applicable).
                                +logSummary :: MonadIO m => EventWriter -> Int64 -> Summary -> m ()
                                +logSummary writer step' summaryProto = do
                                +    t <- liftIO doubleWallTime
                                +    logEvent writer (def & wallTime .~ t
                                +                         & step .~ step'
                                +                         & summary .~ summaryProto
                                +                    )
                                +
                                +
                                +-- Number of seconds since epoch.
                                +doubleWallTime :: IO Double
                                +doubleWallTime = asDouble <$> getCurrentTime
                                +    where asDouble t = fromRational (toRational (utcTimeToPOSIXSeconds t))
                                +
                                +-- | Adds a 'CoreOps.histogramSummary' node. The tag argument is intentionally
                                +-- limited to a single value for simplicity.
                                +histogramSummary ::
                                +    (MonadBuild m, TensorType t, t /= ByteString, t /= Bool)
                                +     -- OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t)
                                +    => ByteString -> Tensor v t -> m ()
                                +histogramSummary tag = addSummary . CoreOps.histogramSummary (scalar tag)
                                +
                                +-- | Adds a 'CoreOps.scalarSummary' node.
                                +scalarSummary ::
                                +    (TensorType t, t /= ByteString, t /= Bool, MonadBuild m)
                                +    -- (TensorType t,
                                +    --  OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t)
                                +    => ByteString -> Tensor v t -> m ()
                                +scalarSummary tag = addSummary . CoreOps.scalarSummary (scalar tag)
                                +
                                +-- | Merge all summaries accumulated in the 'Build' into one summary.
                                +mergeAllSummaries :: MonadBuild m => m SummaryTensor
                                +mergeAllSummaries = collectAllSummaries >>= render . CoreOps.mergeSummary
                                +
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-logging-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/src/style.css b/docs/haddock/tensorflow-logging-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/tensorflow-logging.txt b/docs/haddock/tensorflow-logging-0.1.0.0/tensorflow-logging.txt deleted file mode 100644 index 2039cdb..0000000 --- a/docs/haddock/tensorflow-logging-0.1.0.0/tensorflow-logging.txt +++ /dev/null @@ -1,61 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | TensorBoard related functionality. --- --- Please see README.md -@package tensorflow-logging -@version 0.1.0.0 - - --- | TensorBoard Summary generation. Provides type safe wrappers around raw --- string emitting CoreOps. --- --- Example use: --- ---
                                ---   -- Call summary functions while constructing the graph.
                                ---   createModel = do
                                ---     loss <- -- ...
                                ---     TF.scalarSummary loss
                                ---   
                                ---   -- Write summaries to an EventWriter.
                                ---   train = TF.withEventWriter "/path/to/logs" $ \eventWriter -> do
                                ---       summaryTensor <- TF.build TF.allSummaries
                                ---       forM_ [1..] $ \step -> do
                                ---           if (step % 100 == 0)
                                ---               then do
                                ---                   ((), summaryBytes) <- TF.run (trainStep, summaryTensor)
                                ---                   let summary = decodeMessageOrDie (TF.unScalar summaryBytes)
                                ---                   TF.logSummary eventWriter step summary
                                ---               else TF.run_ trainStep
                                ---   
                                -module TensorFlow.Logging - --- | Handle for logging TensorBoard events safely from multiple threads. -data EventWriter - --- | Writes Event protocol buffers to event files. -withEventWriter :: (MonadIO m, MonadMask m) => FilePath -> (EventWriter -> m a) -> m a - --- | Logs the given Event protocol buffer. -logEvent :: MonadIO m => EventWriter -> Event -> m () - --- | Logs the given Summary event with an optional global step (use 0 if --- not applicable). -logSummary :: MonadIO m => EventWriter -> Int64 -> Summary -> m () - --- | Synonym for the tensors that return serialized Summary proto. -type SummaryTensor = Tensor Value ByteString - --- | Adds a histogramSummary node. The tag argument is intentionally --- limited to a single value for simplicity. -histogramSummary :: (MonadBuild m, TensorType t, t /= ByteString, t /= Bool) => ByteString -> Tensor v t -> m () - --- | Adds a scalarSummary node. -scalarSummary :: (TensorType t, t /= ByteString, t /= Bool, MonadBuild m) => ByteString -> Tensor v t -> m () - --- | Merge all summaries accumulated in the Build into one --- summary. -mergeAllSummaries :: MonadBuild m => m SummaryTensor diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/LICENSE b/docs/haddock/tensorflow-mnist-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html index 383104a..644767f 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html @@ -1,4 +1,4 @@ -TensorFlow.Examples.MNIST.Parse

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.Examples.MNIST.Parse

                                Documentation

                                type MNIST = Vector Word8

                                Utilities specific to MNIST.

                                drawMNIST :: MNIST -> Text

                                Produces a unicode rendering of the MNIST digit sample.

                                checkEndian :: Get ()

                                Check's the file's endianess, throwing an error if it's not as expected.

                                readMNISTSamples :: FilePath -> IO [MNIST]

                                Reads an MNIST file and returns a list of samples.

                                readMNISTLabels :: FilePath -> IO [Word8]

                                Reads a list of MNIST labels from a file and returns them.

                                readMessageFromFileOrDie :: Message m => FilePath -> IO m

                                \ No newline at end of file +

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.Examples.MNIST.Parse

                                Documentation

                                type MNIST = Vector Word8 Source #

                                Utilities specific to MNIST.

                                drawMNIST :: MNIST -> Text Source #

                                Produces a unicode rendering of the MNIST digit sample.

                                checkEndian :: Get () Source #

                                Check's the file's endianess, throwing an error if it's not as expected.

                                readMNISTSamples :: FilePath -> IO [MNIST] Source #

                                Reads an MNIST file and returns a list of samples.

                                readMNISTLabels :: FilePath -> IO [Word8] Source #

                                Reads a list of MNIST labels from a file and returns them.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html index 0079824..62ca462 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html @@ -1,4 +1,4 @@ -TensorFlow.Examples.MNIST.TrainedGraph

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                Safe HaskellSafe
                                LanguageHaskell2010

                                TensorFlow.Examples.MNIST.TrainedGraph

                                Description

                                Paths to test helper files.

                                Documentation

                                mnistPb :: IO FilePath

                                File containing a Tensorflow serialized proto of MNIST.

                                wtsCkpt :: IO ByteString

                                Files containing pre-trained weights for MNIST.

                                biasCkpt :: IO ByteString

                                Files containing pre-trained weights for MNIST.

                                \ No newline at end of file +

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                Safe HaskellSafe
                                LanguageHaskell2010

                                TensorFlow.Examples.MNIST.TrainedGraph

                                Description

                                Paths to test helper files.

                                Documentation

                                mnistPb :: IO FilePath Source #

                                File containing a Tensorflow serialized proto of MNIST.

                                wtsCkpt :: IO ByteString Source #

                                Files containing pre-trained weights for MNIST.

                                biasCkpt :: IO ByteString Source #

                                Files containing pre-trained weights for MNIST.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-mnist-0.1.0.0/doc-index.html index 5f705d0..e8aa687 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model. (Index)

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                \ No newline at end of file +

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/frames.html b/docs/haddock/tensorflow-mnist-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-mnist-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-mnist-0.1.0.0/index-frames.html deleted file mode 100644 index 3463791..0000000 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model. \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/index.html b/docs/haddock/tensorflow-mnist-0.1.0.0/index.html index 3ccb460..f1086c5 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/index.html @@ -1,4 +1,4 @@ -tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                Please see README.md

                                \ No newline at end of file +

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

                                Please see README.md

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-Parse.html b/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-Parse.html index 720a2bf..a09aaa0 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-Parse.html +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-Parse.html @@ -1,4 +1,4 @@ -TensorFlow.Examples.MNIST.Parse

                                TensorFlow.Examples.MNIST.Parse

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-TrainedGraph.html b/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-TrainedGraph.html index f4f119a..1d9fb3e 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-TrainedGraph.html +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-TrainedGraph.html @@ -1,4 +1,4 @@ -TensorFlow.Examples.MNIST.TrainedGraph

                                TensorFlow.Examples.MNIST.TrainedGraph

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/ocean.css b/docs/haddock/tensorflow-mnist-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html b/docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html new file mode 100644 index 0000000..8d03739 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html @@ -0,0 +1,51 @@ +
                                {-# LANGUAGE CPP #-}
                                +{-# OPTIONS_GHC -fno-warn-missing-import-lists #-}
                                +{-# OPTIONS_GHC -fno-warn-implicit-prelude #-}
                                +module Paths_tensorflow_mnist (
                                +    version,
                                +    getBinDir, getLibDir, getDynLibDir, getDataDir, getLibexecDir,
                                +    getDataFileName, getSysconfDir
                                +  ) where
                                +
                                +import qualified Control.Exception as Exception
                                +import Data.Version (Version(..))
                                +import System.Environment (getEnv)
                                +import Prelude
                                +
                                +#if defined(VERSION_base)
                                +
                                +#if MIN_VERSION_base(4,0,0)
                                +catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a
                                +#else
                                +catchIO :: IO a -> (Exception.Exception -> IO a) -> IO a
                                +#endif
                                +
                                +#else
                                +catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a
                                +#endif
                                +catchIO = Exception.catch
                                +
                                +version :: Version
                                +version = Version [0,1,0,0] []
                                +bindir, libdir, dynlibdir, datadir, libexecdir, sysconfdir :: FilePath
                                +
                                +bindir     = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/bin"
                                +libdir     = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/lib/x86_64-linux-ghc-8.0.2/tensorflow-mnist-0.1.0.0-LVSlLKNCxlnazhUsgSZZs"
                                +dynlibdir  = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/lib/x86_64-linux-ghc-8.0.2"
                                +datadir    = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/share/x86_64-linux-ghc-8.0.2/tensorflow-mnist-0.1.0.0"
                                +libexecdir = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/libexec"
                                +sysconfdir = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/etc"
                                +
                                +getBinDir, getLibDir, getDynLibDir, getDataDir, getLibexecDir, getSysconfDir :: IO FilePath
                                +getBinDir = catchIO (getEnv "tensorflow_mnist_bindir") (\_ -> return bindir)
                                +getLibDir = catchIO (getEnv "tensorflow_mnist_libdir") (\_ -> return libdir)
                                +getDynLibDir = catchIO (getEnv "tensorflow_mnist_dynlibdir") (\_ -> return dynlibdir)
                                +getDataDir = catchIO (getEnv "tensorflow_mnist_datadir") (\_ -> return datadir)
                                +getLibexecDir = catchIO (getEnv "tensorflow_mnist_libexecdir") (\_ -> return libexecdir)
                                +getSysconfDir = catchIO (getEnv "tensorflow_mnist_sysconfdir") (\_ -> return sysconfdir)
                                +
                                +getDataFileName :: FilePath -> IO FilePath
                                +getDataFileName name = do
                                +  dir <- getDataDir
                                +  return (dir ++ "/" ++ name)
                                +
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow.Examples.MNIST.Parse.html b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow.Examples.MNIST.Parse.html new file mode 100644 index 0000000..694c96b --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow.Examples.MNIST.Parse.html @@ -0,0 +1,97 @@ +
                                -- Copyright 2016 TensorFlow authors.
                                +--
                                +-- Licensed under the Apache License, Version 2.0 (the "License");
                                +-- you may not use this file except in compliance with the License.
                                +-- You may obtain a copy of the License at
                                +--
                                +--     http://www.apache.org/licenses/LICENSE-2.0
                                +--
                                +-- Unless required by applicable law or agreed to in writing, software
                                +-- distributed under the License is distributed on an "AS IS" BASIS,
                                +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                                +-- See the License for the specific language governing permissions and
                                +-- limitations under the License.
                                +
                                +{-# LANGUAGE OverloadedStrings #-}
                                +{-# LANGUAGE OverloadedLists #-}
                                +{-# LANGUAGE TypeSynonymInstances #-}
                                +{-# LANGUAGE FlexibleInstances #-}
                                +{-# LANGUAGE ViewPatterns #-}
                                +
                                +module TensorFlow.Examples.MNIST.Parse where
                                +
                                +import Control.Monad (when, liftM)
                                +import Data.Binary.Get (Get, runGet, getWord32be, getLazyByteString)
                                +import Data.ByteString.Lazy (toStrict, readFile)
                                +import Data.List.Split (chunksOf)
                                +import Data.Monoid ((<>))
                                +import Data.ProtoLens (Message, decodeMessageOrDie)
                                +import Data.Text (Text)
                                +import Data.Word (Word8, Word32)
                                +import Prelude hiding (readFile)
                                +import qualified Codec.Compression.GZip as GZip
                                +import qualified Data.ByteString.Lazy as L
                                +import qualified Data.Text as Text
                                +import qualified Data.Vector as V
                                +
                                +-- | Utilities specific to MNIST.
                                +type MNIST = V.Vector Word8
                                +
                                +-- | Produces a unicode rendering of the MNIST digit sample.
                                +drawMNIST :: MNIST -> Text
                                +drawMNIST = chunk . block
                                +  where
                                +    block :: V.Vector Word8 -> Text
                                +    block (V.splitAt 1 -> ([0], xs)) = " " <> block xs
                                +    block (V.splitAt 1 -> ([n], xs)) = c `Text.cons` block xs
                                +      where c = "\9617\9618\9619\9608" !! fromIntegral (n `div` 64)
                                +    block (V.splitAt 1 -> _)   = ""
                                +    chunk :: Text -> Text
                                +    chunk "" = "\n"
                                +    chunk xs = Text.take 28 xs <> "\n" <> chunk (Text.drop 28 xs)
                                +
                                +-- | Check's the file's endianess, throwing an error if it's not as expected.
                                +checkEndian :: Get ()
                                +checkEndian = do
                                +    magic <- getWord32be
                                +    when (magic `notElem` ([2049, 2051] :: [Word32])) $
                                +        fail "Expected big endian, but image file is little endian."
                                +
                                +-- | Reads an MNIST file and returns a list of samples.
                                +readMNISTSamples :: FilePath -> IO [MNIST]
                                +readMNISTSamples path = do
                                +    raw <- GZip.decompress <$> readFile path
                                +    return $ runGet getMNIST raw
                                +  where
                                +    getMNIST :: Get [MNIST]
                                +    getMNIST = do
                                +        checkEndian
                                +        -- Parse header data.
                                +        cnt  <- liftM fromIntegral getWord32be
                                +        rows <- liftM fromIntegral getWord32be
                                +        cols <- liftM fromIntegral getWord32be
                                +        -- Read all of the data, then split into samples.
                                +        pixels <- getLazyByteString $ fromIntegral $ cnt * rows * cols
                                +        return $ V.fromList <$> chunksOf (rows * cols) (L.unpack pixels)
                                +
                                +-- | Reads a list of MNIST labels from a file and returns them.
                                +readMNISTLabels :: FilePath -> IO [Word8]
                                +readMNISTLabels path = do
                                +    raw <- GZip.decompress <$> readFile path
                                +    return $ runGet getLabels raw
                                +  where getLabels :: Get [Word8]
                                +        getLabels = do
                                +            checkEndian
                                +            -- Parse header data.
                                +            cnt <- liftM fromIntegral getWord32be
                                +            -- Read all of the labels.
                                +            L.unpack <$> getLazyByteString cnt
                                +
                                +readMessageFromFileOrDie :: Message m => FilePath -> IO m
                                +readMessageFromFileOrDie path = do
                                +    pb <- readFile path
                                +    return $ decodeMessageOrDie $ toStrict pb
                                +
                                +-- TODO: Write a writeMessageFromFileOrDie and read/write non-lethal
                                +--             versions.
                                +
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow.Examples.MNIST.TrainedGraph.html b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow.Examples.MNIST.TrainedGraph.html new file mode 100644 index 0000000..1088595 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow.Examples.MNIST.TrainedGraph.html @@ -0,0 +1,31 @@ +
                                -- Copyright 2016 TensorFlow authors.
                                +--
                                +-- Licensed under the Apache License, Version 2.0 (the "License");
                                +-- you may not use this file except in compliance with the License.
                                +-- You may obtain a copy of the License at
                                +--
                                +--     http://www.apache.org/licenses/LICENSE-2.0
                                +--
                                +-- Unless required by applicable law or agreed to in writing, software
                                +-- distributed under the License is distributed on an "AS IS" BASIS,
                                +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                                +-- See the License for the specific language governing permissions and
                                +-- limitations under the License.
                                +
                                +{-# LANGUAGE OverloadedStrings #-}
                                +-- | Paths to test helper files.
                                +module TensorFlow.Examples.MNIST.TrainedGraph where
                                +
                                +import Paths_tensorflow_mnist (getDataFileName)
                                +import Data.ByteString (ByteString)
                                +import Data.ByteString.Char8 (pack)
                                +
                                +-- | File containing a Tensorflow serialized proto of MNIST.
                                +mnistPb :: IO FilePath
                                +mnistPb = getDataFileName "data/MNIST.pb"
                                +
                                +-- | Files containing pre-trained weights for MNIST.
                                +wtsCkpt, biasCkpt :: IO ByteString
                                +wtsCkpt = pack <$> getDataFileName "data/MNISTWts.ckpt"
                                +biasCkpt = pack <$> getDataFileName "data/MNISTBias.ckpt"
                                +
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-mnist-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/style.css b/docs/haddock/tensorflow-mnist-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/tensorflow-mnist.txt b/docs/haddock/tensorflow-mnist-0.1.0.0/tensorflow-mnist.txt deleted file mode 100644 index 5248c19..0000000 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/tensorflow-mnist.txt +++ /dev/null @@ -1,41 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | TensorFlow demo application for learning MNIST model. --- --- Please see README.md -@package tensorflow-mnist -@version 0.1.0.0 - - --- | Paths to test helper files. -module TensorFlow.Examples.MNIST.TrainedGraph - --- | File containing a Tensorflow serialized proto of MNIST. -mnistPb :: IO FilePath - --- | Files containing pre-trained weights for MNIST. -wtsCkpt :: IO ByteString - --- | Files containing pre-trained weights for MNIST. -biasCkpt :: IO ByteString - -module TensorFlow.Examples.MNIST.Parse - --- | Utilities specific to MNIST. -type MNIST = Vector Word8 - --- | Produces a unicode rendering of the MNIST digit sample. -drawMNIST :: MNIST -> Text - --- | Check's the file's endianess, throwing an error if it's not as --- expected. -checkEndian :: Get () - --- | Reads an MNIST file and returns a list of samples. -readMNISTSamples :: FilePath -> IO [MNIST] - --- | Reads a list of MNIST labels from a file and returns them. -readMNISTLabels :: FilePath -> IO [Word8] -readMessageFromFileOrDie :: Message m => FilePath -> IO m diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/LICENSE b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html index b9a21bf..a78fd29 100644 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html @@ -1,4 +1,4 @@ -TensorFlow.Examples.MNIST.InputData

                                tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

                                Safe HaskellSafe
                                LanguageHaskell2010

                                TensorFlow.Examples.MNIST.InputData

                                Documentation

                                trainingImageData :: IO FilePath

                                Download the files containing the canonical MNIST samples and labels.

                                trainingLabelData :: IO FilePath

                                Download the files containing the canonical MNIST samples and labels.

                                \ No newline at end of file +

                                tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

                                Safe HaskellSafe
                                LanguageHaskell2010

                                TensorFlow.Examples.MNIST.InputData

                                Documentation

                                trainingImageData :: IO FilePath Source #

                                Download the files containing the canonical MNIST samples and labels.

                                trainingLabelData :: IO FilePath Source #

                                Download the files containing the canonical MNIST samples and labels.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/doc-index.html index 0fe16fa..b24a6b4 100644 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST. (Index)

                                tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

                                \ No newline at end of file +

                                tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/frames.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index-frames.html deleted file mode 100644 index 7542511..0000000 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST. \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index.html index c041e18..8e30ab1 100644 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index.html @@ -1,4 +1,4 @@ -tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

                                tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

                                tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

                                Please see README.md

                                Modules

                                \ No newline at end of file +

                                tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

                                tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

                                Please see README.md

                                Modules

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/mini_TensorFlow-Examples-MNIST-InputData.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/mini_TensorFlow-Examples-MNIST-InputData.html index d719ceb..d850cb0 100644 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/mini_TensorFlow-Examples-MNIST-InputData.html +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/mini_TensorFlow-Examples-MNIST-InputData.html @@ -1,4 +1,4 @@ -TensorFlow.Examples.MNIST.InputData

                                TensorFlow.Examples.MNIST.InputData

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/ocean.css b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html new file mode 100644 index 0000000..cec2721 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html @@ -0,0 +1,51 @@ +
                                {-# LANGUAGE CPP #-}
                                +{-# OPTIONS_GHC -fno-warn-missing-import-lists #-}
                                +{-# OPTIONS_GHC -fno-warn-implicit-prelude #-}
                                +module Paths_tensorflow_mnist_input_data (
                                +    version,
                                +    getBinDir, getLibDir, getDynLibDir, getDataDir, getLibexecDir,
                                +    getDataFileName, getSysconfDir
                                +  ) where
                                +
                                +import qualified Control.Exception as Exception
                                +import Data.Version (Version(..))
                                +import System.Environment (getEnv)
                                +import Prelude
                                +
                                +#if defined(VERSION_base)
                                +
                                +#if MIN_VERSION_base(4,0,0)
                                +catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a
                                +#else
                                +catchIO :: IO a -> (Exception.Exception -> IO a) -> IO a
                                +#endif
                                +
                                +#else
                                +catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a
                                +#endif
                                +catchIO = Exception.catch
                                +
                                +version :: Version
                                +version = Version [0,1,0,0] []
                                +bindir, libdir, dynlibdir, datadir, libexecdir, sysconfdir :: FilePath
                                +
                                +bindir     = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/bin"
                                +libdir     = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/lib/x86_64-linux-ghc-8.0.2/tensorflow-mnist-input-data-0.1.0.0-IhOe5EsZKPy2oP1GEheqzD"
                                +dynlibdir  = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/lib/x86_64-linux-ghc-8.0.2"
                                +datadir    = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/share/x86_64-linux-ghc-8.0.2/tensorflow-mnist-input-data-0.1.0.0"
                                +libexecdir = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/libexec"
                                +sysconfdir = "/usr/local/google/home/fmayle/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-8.13/8.0.2/etc"
                                +
                                +getBinDir, getLibDir, getDynLibDir, getDataDir, getLibexecDir, getSysconfDir :: IO FilePath
                                +getBinDir = catchIO (getEnv "tensorflow_mnist_input_data_bindir") (\_ -> return bindir)
                                +getLibDir = catchIO (getEnv "tensorflow_mnist_input_data_libdir") (\_ -> return libdir)
                                +getDynLibDir = catchIO (getEnv "tensorflow_mnist_input_data_dynlibdir") (\_ -> return dynlibdir)
                                +getDataDir = catchIO (getEnv "tensorflow_mnist_input_data_datadir") (\_ -> return datadir)
                                +getLibexecDir = catchIO (getEnv "tensorflow_mnist_input_data_libexecdir") (\_ -> return libexecdir)
                                +getSysconfDir = catchIO (getEnv "tensorflow_mnist_input_data_sysconfdir") (\_ -> return sysconfdir)
                                +
                                +getDataFileName :: FilePath -> IO FilePath
                                +getDataFileName name = do
                                +  dir <- getDataDir
                                +  return (dir ++ "/" ++ name)
                                +
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow.Examples.MNIST.InputData.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow.Examples.MNIST.InputData.html new file mode 100644 index 0000000..e64aabf --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow.Examples.MNIST.InputData.html @@ -0,0 +1,32 @@ +
                                -- Copyright 2016 TensorFlow authors.
                                +--
                                +-- Licensed under the Apache License, Version 2.0 (the "License");
                                +-- you may not use this file except in compliance with the License.
                                +-- You may obtain a copy of the License at
                                +--
                                +--     http://www.apache.org/licenses/LICENSE-2.0
                                +--
                                +-- Unless required by applicable law or agreed to in writing, software
                                +-- distributed under the License is distributed on an "AS IS" BASIS,
                                +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                                +-- See the License for the specific language governing permissions and
                                +-- limitations under the License.
                                +
                                +module TensorFlow.Examples.MNIST.InputData
                                +  ( trainingImageData
                                +  , trainingLabelData
                                +  , testImageData
                                +  , testLabelData
                                +  ) where
                                +
                                +import Paths_tensorflow_mnist_input_data (getDataFileName)
                                +
                                +-- | Download the files containing the canonical MNIST samples and labels.
                                +trainingImageData, trainingLabelData :: IO FilePath
                                +trainingImageData = getDataFileName "train-images-idx3-ubyte.gz"
                                +trainingLabelData = getDataFileName "train-labels-idx1-ubyte.gz"
                                +
                                +testImageData, testLabelData :: IO FilePath
                                +testImageData = getDataFileName "t10k-images-idx3-ubyte.gz"
                                +testLabelData = getDataFileName "t10k-labels-idx1-ubyte.gz"
                                +
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/style.css b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/tensorflow-mnist-input-data.txt b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/tensorflow-mnist-input-data.txt deleted file mode 100644 index afb87ad..0000000 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/tensorflow-mnist-input-data.txt +++ /dev/null @@ -1,19 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | Downloader of input data for training MNIST. --- --- Please see README.md -@package tensorflow-mnist-input-data -@version 0.1.0.0 - -module TensorFlow.Examples.MNIST.InputData - --- | Download the files containing the canonical MNIST samples and labels. -trainingImageData :: IO FilePath - --- | Download the files containing the canonical MNIST samples and labels. -trainingLabelData :: IO FilePath -testImageData :: IO FilePath -testLabelData :: IO FilePath diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/TensorFlow-NN.html b/docs/haddock/tensorflow-nn-0.1.0.0/TensorFlow-NN.html deleted file mode 100644 index 0004a3a..0000000 --- a/docs/haddock/tensorflow-nn-0.1.0.0/TensorFlow-NN.html +++ /dev/null @@ -1,15 +0,0 @@ -TensorFlow.NN

                                tensorflow-nn-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.NN

                                Documentation

                                sigmoidCrossEntropyWithLogits

                                Arguments

                                :: (MonadBuild m, OneOf `[Float, Double]` a, TensorType a, Num a) 
                                => Tensor Value a

                                logits

                                -> Tensor Value a

                                targets

                                -> m (Tensor Value a) 

                                Computes sigmoid cross entropy given logits.

                                Measures the probability error in discrete classification tasks in which each - class is independent and not mutually exclusive. For instance, one could - perform multilabel classification where a picture can contain both an elephant - and a dog at the same time.

                                For brevity, let `x = logits`, `z = targets`. The logistic loss is

                                z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) - = z * -log(1 (1 + exp(-x))) + (1 - z) * -log(exp(-x) (1 + exp(-x))) - = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) - = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) - = (1 - z) * x + log(1 + exp(-x)) - = x - x * z + log(1 + exp(-x))

                                For x < 0, to avoid overflow in exp(-x), we reformulate the above

                                x - x * z + log(1 + exp(-x)) - = log(exp(x)) - x * z + log(1 + exp(-x)) - = - x * z + log(1 + exp(x))

                                Hence, to ensure stability and avoid overflow, the implementation uses this - equivalent formulation

                                max(x, 0) - x * z + log(1 + exp(-abs(x)))

                                logits and targets must have the same type and shape.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-nn-0.1.0.0/doc-index.html deleted file mode 100644 index f38f4a6..0000000 --- a/docs/haddock/tensorflow-nn-0.1.0.0/doc-index.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-nn-0.1.0.0: Friendly layer around TensorFlow bindings. (Index)

                                tensorflow-nn-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Index

                                sigmoidCrossEntropyWithLogitsTensorFlow.NN
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/frames.html b/docs/haddock/tensorflow-nn-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-nn-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-nn-0.1.0.0/haddock-util.js deleted file mode 100644 index 9a6fccf..0000000 --- a/docs/haddock/tensorflow-nn-0.1.0.0/haddock-util.js +++ /dev/null @@ -1,344 +0,0 @@ -// Haddock JavaScript utilities - -var rspace = /\s\s+/g, - rtrim = /^\s+|\s+$/g; - -function spaced(s) { return (" " + s + " ").replace(rspace, " "); } -function trim(s) { return s.replace(rtrim, ""); } - -function hasClass(elem, value) { - var className = spaced(elem.className || ""); - return className.indexOf( " " + value + " " ) >= 0; -} - -function addClass(elem, value) { - var className = spaced(elem.className || ""); - if ( className.indexOf( " " + value + " " ) < 0 ) { - elem.className = trim(className + " " + value); - } -} - -function removeClass(elem, value) { - var className = spaced(elem.className || ""); - className = className.replace(" " + value + " ", " "); - elem.className = trim(className); -} - -function toggleClass(elem, valueOn, valueOff, bool) { - if (bool == null) { bool = ! hasClass(elem, valueOn); } - if (bool) { - removeClass(elem, valueOff); - addClass(elem, valueOn); - } - else { - removeClass(elem, valueOn); - addClass(elem, valueOff); - } - return bool; -} - - -function makeClassToggle(valueOn, valueOff) -{ - return function(elem, bool) { - return toggleClass(elem, valueOn, valueOff, bool); - } -} - -toggleShow = makeClassToggle("show", "hide"); -toggleCollapser = makeClassToggle("collapser", "expander"); - -function toggleSection(id) -{ - var b = toggleShow(document.getElementById("section." + id)); - toggleCollapser(document.getElementById("control." + id), b); - rememberCollapsed(id, b); - return b; -} - -var collapsed = {}; -function rememberCollapsed(id, b) -{ - if(b) - delete collapsed[id] - else - collapsed[id] = null; - - var sections = []; - for(var i in collapsed) - { - if(collapsed.hasOwnProperty(i)) - sections.push(i); - } - // cookie specific to this page; don't use setCookie which sets path=/ - document.cookie = "collapsed=" + escape(sections.join('+')); -} - -function restoreCollapsed() -{ - var cookie = getCookie("collapsed"); - if(!cookie) - return; - - var ids = cookie.split('+'); - for(var i in ids) - { - if(document.getElementById("section." + ids[i])) - toggleSection(ids[i]); - } -} - -function setCookie(name, value) { - document.cookie = name + "=" + escape(value) + ";path=/;"; -} - -function clearCookie(name) { - document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; -} - -function getCookie(name) { - var nameEQ = name + "="; - var ca = document.cookie.split(';'); - for(var i=0;i < ca.length;i++) { - var c = ca[i]; - while (c.charAt(0)==' ') c = c.substring(1,c.length); - if (c.indexOf(nameEQ) == 0) { - return unescape(c.substring(nameEQ.length,c.length)); - } - } - return null; -} - - - -var max_results = 75; // 50 is not enough to search for map in the base libraries -var shown_range = null; -var last_search = null; - -function quick_search() -{ - perform_search(false); -} - -function full_search() -{ - perform_search(true); -} - - -function perform_search(full) -{ - var text = document.getElementById("searchbox").value.toLowerCase(); - if (text == last_search && !full) return; - last_search = text; - - var table = document.getElementById("indexlist"); - var status = document.getElementById("searchmsg"); - var children = table.firstChild.childNodes; - - // first figure out the first node with the prefix - var first = bisect(-1); - var last = (first == -1 ? -1 : bisect(1)); - - if (first == -1) - { - table.className = ""; - status.innerHTML = "No results found, displaying all"; - } - else if (first == 0 && last == children.length - 1) - { - table.className = ""; - status.innerHTML = ""; - } - else if (last - first >= max_results && !full) - { - table.className = ""; - status.innerHTML = "More than " + max_results + ", press Search to display"; - } - else - { - // decide what you need to clear/show - if (shown_range) - setclass(shown_range[0], shown_range[1], "indexrow"); - setclass(first, last, "indexshow"); - shown_range = [first, last]; - table.className = "indexsearch"; - status.innerHTML = ""; - } - - - function setclass(first, last, status) - { - for (var i = first; i <= last; i++) - { - children[i].className = status; - } - } - - - // do a binary search, treating 0 as ... - // return either -1 (no 0's found) or location of most far match - function bisect(dir) - { - var first = 0, finish = children.length - 1; - var mid, success = false; - - while (finish - first > 3) - { - mid = Math.floor((finish + first) / 2); - - var i = checkitem(mid); - if (i == 0) i = dir; - if (i == -1) - finish = mid; - else - first = mid; - } - var a = (dir == 1 ? first : finish); - var b = (dir == 1 ? finish : first); - for (var i = b; i != a - dir; i -= dir) - { - if (checkitem(i) == 0) return i; - } - return -1; - } - - - // from an index, decide what the result is - // 0 = match, -1 is lower, 1 is higher - function checkitem(i) - { - var s = getitem(i).toLowerCase().substr(0, text.length); - if (s == text) return 0; - else return (s > text ? -1 : 1); - } - - - // from an index, get its string - // this abstracts over alternates - function getitem(i) - { - for ( ; i >= 0; i--) - { - var s = children[i].firstChild.firstChild.data; - if (s.indexOf(' ') == -1) - return s; - } - return ""; // should never be reached - } -} - -function setSynopsis(filename) { - if (parent.window.synopsis) { - if (parent.window.synopsis.location.replace) { - // In Firefox this avoids adding the change to the history. - parent.window.synopsis.location.replace(filename); - } else { - parent.window.synopsis.location = filename; - } - } -} - -function addMenuItem(html) { - var menu = document.getElementById("page-menu"); - if (menu) { - var btn = menu.firstChild.cloneNode(false); - btn.innerHTML = html; - menu.appendChild(btn); - } -} - -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - -function styles() { - var i, a, es = document.getElementsByTagName("link"), rs = []; - for (i = 0; a = es[i]; i++) { - if(a.rel.indexOf("style") != -1 && a.title) { - rs.push(a); - } - } - return rs; -} - -function addStyleMenu() { - var as = styles(); - var i, a, btns = ""; - for(i=0; a = as[i]; i++) { - btns += "
                              • " - + a.title + "
                              • " - } - if (as.length > 1) { - var h = "
                                " - + "Style ▾" - + "
                                  " + btns + "
                                " - + "
                                "; - addMenuItem(h); - } -} - -function setActiveStyleSheet(title) { - var as = styles(); - var i, a, found; - for(i=0; a = as[i]; i++) { - a.disabled = true; - // need to do this always, some browsers are edge triggered - if(a.title == title) { - found = a; - } - } - if (found) { - found.disabled = false; - setCookie("haddock-style", title); - } - else { - as[0].disabled = false; - clearCookie("haddock-style"); - } - styleMenu(false); -} - -function resetStyle() { - var s = getCookie("haddock-style"); - if (s) setActiveStyleSheet(s); -} - - -function styleMenu(show) { - var m = document.getElementById('style-menu'); - if (m) toggleShow(m, show); -} - - -function pageLoad() { - addStyleMenu(); - adjustForFrames(); - resetStyle(); - restoreCollapsed(); -} - diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-nn-0.1.0.0/hslogo-16.png deleted file mode 100644 index 0ff8579fbd897417b0d6dad6e920f8882138a7c0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-nn-0.1.0.0: Friendly layer around TensorFlow bindings. \ No newline at end of file diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/index.html b/docs/haddock/tensorflow-nn-0.1.0.0/index.html deleted file mode 100644 index c21a82e..0000000 --- a/docs/haddock/tensorflow-nn-0.1.0.0/index.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-nn-0.1.0.0: Friendly layer around TensorFlow bindings.

                                tensorflow-nn-0.1.0.0: Friendly layer around TensorFlow bindings.

                                tensorflow-nn-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Please see README.md

                                Modules

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/minus.gif b/docs/haddock/tensorflow-nn-0.1.0.0/minus.gif deleted file mode 100644 index 1deac2fe1a42e35b994f1b855488f392c50f6a89..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 56 zcmZ?wbhEHb * { - font-size: 93%; /* 12pt */ -} - -#mini #module-list .caption, -#mini #module-header .caption { - font-size: 125%; /* 15pt */ -} - -#mini #interface h1, -#mini #interface h2, -#mini #interface h3, -#mini #interface h4 { - font-size: 109%; /* 13pt */ - margin: 1em 0 0; -} - -#mini #interface .top, -#mini #interface .src { - margin: 0; -} - -#mini #module-list ul { - list-style: none; - margin: 0; -} - -#alphabet ul { - list-style: none; - padding: 0; - margin: 0.5em 0 0; - text-align: center; -} - -#alphabet li { - display: inline; - margin: 0 0.25em; -} - -#alphabet a { - font-weight: bold; -} - -#index .caption, -#module-list .caption { font-size: 131%; /* 17pt */ } - -#index table { - margin-left: 2em; -} - -#index .src { - font-weight: bold; -} -#index .alt { - font-size: 77%; /* 10pt */ - font-style: italic; - padding-left: 2em; -} - -#index td + td { - padding-left: 1em; -} - -#module-list ul { - list-style: none; - margin: 0 0 0 2em; -} - -#module-list li { - clear: right; -} - -#module-list span.collapser, -#module-list span.expander { - background-position: 0 0.3em; -} - -#module-list .package { - float: right; -} - -/* @end */ diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/plus.gif b/docs/haddock/tensorflow-nn-0.1.0.0/plus.gif deleted file mode 100644 index 2d15c14173d23f664b955cd24f51c82f5f09d91d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-nn-0.1.0.0/synopsis.png deleted file mode 100644 index 85fb86ec84907bcc86531dc82871948ff4d471fa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/tensorflow-nn.txt b/docs/haddock/tensorflow-nn-0.1.0.0/tensorflow-nn.txt deleted file mode 100644 index 041f9a8..0000000 --- a/docs/haddock/tensorflow-nn-0.1.0.0/tensorflow-nn.txt +++ /dev/null @@ -1,40 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | Friendly layer around TensorFlow bindings. --- --- Please see README.md -@package tensorflow-nn -@version 0.1.0.0 - -module TensorFlow.NN - --- | Computes sigmoid cross entropy given logits. --- --- Measures the probability error in discrete classification tasks in --- which each class is independent and not mutually exclusive. For --- instance, one could perform multilabel classification where a picture --- can contain both an elephant and a dog at the same time. --- --- For brevity, let `x = logits`, `z = targets`. The logistic loss is --- --- z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 --- (1 + exp(-x))) + (1 - z) * -log(exp(-x) (1 + exp(-x))) = z * --- log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * --- log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + --- log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) --- --- For x < 0, to avoid overflow in exp(-x), we reformulate the above --- --- x - x * z + log(1 + exp(-x)) = log(exp(x)) - x * z + log(1 + exp(-x)) --- = - x * z + log(1 + exp(x)) --- --- Hence, to ensure stability and avoid overflow, the implementation uses --- this equivalent formulation --- --- max(x, 0) - x * z + log(1 + exp(-abs(x))) --- --- logits and targets must have the same type and --- shape. -sigmoidCrossEntropyWithLogits :: (MonadBuild m, OneOf '[Float, Double] a, TensorType a, Num a) => Tensor Value a -> Tensor Value a -> m (Tensor Value a) diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/LICENSE b/docs/haddock/tensorflow-opgen-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-ParsedOp.html b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-ParsedOp.html index 1fd0479..f032041 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-ParsedOp.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-ParsedOp.html @@ -1,10 +1,10 @@ -TensorFlow.OpGen.ParsedOp

                                tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.OpGen.ParsedOp

                                Description

                                This module helps parse the proto OpDef into a Haskell type which is more +

                                tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.OpGen.ParsedOp

                                Description

                                This module helps parse the proto OpDef into a Haskell type which is more descriptive of how the attributes and arguments will be used in the - generated code.

                                Documentation

                                data ParsedOp

                                Constructors

                                ParsedOp 

                                Fields

                                parsedOpName :: Name
                                 
                                parsedOpSummary :: Text
                                 
                                parsedOpDescription :: Text
                                 
                                parsedInputs :: [ParsedArg]
                                 
                                parsedOutputs :: [ParsedArg]
                                 
                                explicitInputAttrs :: [Attr AttrType]

                                Attributes that must be set explicitly when creating the op. - Associated with the type of the attribute.

                                inferredTypeAttrs :: [Attr TypeParam]

                                Attributes that are type parameters.

                                inferredListSizeAttrs :: [Attr (NonEmpty Name)]
                                 
                                parsedOpIsMonadic :: Bool

                                Whether this op is stateful or takes a stateful input. Such ops + generated code.

                                Documentation

                                data ParsedOp Source #

                                Constructors

                                ParsedOp 

                                Fields

                                data Name

                                Constructors

                                Name 

                                newtype HaskellName

                                A name that's appropriate for a variable in a Haskell source file.

                                Constructors

                                HaskellName 

                                Fields

                                unHaskellName :: Text
                                 

                                newtype TFName

                                A raw name as specified in the OpDef proto.

                                Constructors

                                TFName 

                                Fields

                                unTFName :: Text
                                 

                                Instances

                                data Attr a

                                A named attribute, associated with some information about it.

                                Constructors

                                Attr 

                                Fields

                                attrName :: Name
                                 
                                attrDescription :: Text
                                 
                                attrInfo :: a
                                 

                                data AttrType

                                The type of an attribute.

                                Instances

                                data TypeParam

                                Constructors

                                TypeParam 

                                Fields

                                typeParamIsList :: Bool
                                 
                                typeParamRestrictions :: Maybe (NonEmpty DataType)

                                The list of allowed types (see: TensorFlow.Types.OneOf). - If Nothing, then any type is acceptable.

                                data ParsedArg

                                An input or output argument (Tensor) for an op.

                                data ParsedArgCase

                                Constructors

                                SimpleArg 
                                ListArg 

                                Fields

                                argLength :: Name

                                The attribute that specifies this list's length.

                                argType :: ArgType
                                 
                                argCaseKind :: ArgKind
                                 
                                MixedListArg

                                A heterogeneous list.

                                ResourceArg 

                                data ArgType

                                The type of an argument.

                                Constructors

                                ArgTypeFixed DataType

                                A fixed type.

                                ArgTypeAttr Name

                                A type that depends on an attribute.

                                camelCase :: Text -> Text

                                \ No newline at end of file + Build action).

                                data Name Source #

                                Constructors

                                Name 

                                newtype HaskellName Source #

                                A name that's appropriate for a variable in a Haskell source file.

                                Constructors

                                HaskellName 

                                Fields

                                newtype TFName Source #

                                A raw name as specified in the OpDef proto.

                                Constructors

                                TFName 

                                Fields

                                Instances

                                data Attr a Source #

                                A named attribute, associated with some information about it.

                                Constructors

                                Attr 

                                Fields

                                data AttrType Source #

                                The type of an attribute.

                                Instances

                                data TypeParam Source #

                                Constructors

                                TypeParam 

                                Fields

                                data ParsedArg Source #

                                An input or output argument (Tensor) for an op.

                                data ParsedArgCase Source #

                                Constructors

                                SimpleArg 
                                ListArg 

                                Fields

                                MixedListArg

                                A heterogeneous list.

                                data ArgType Source #

                                The type of an argument.

                                Constructors

                                ArgTypeFixed DataType

                                A fixed type.

                                ArgTypeAttr Name

                                A type that depends on an attribute.

                                camelCase :: Text -> Text Source #

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html index b60dd5c..6a7fc07 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html @@ -1,14 +1,13 @@ -TensorFlow.OpGen

                                tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.OpGen

                                Description

                                Rendering of TensorFlow operations as Haskell functions.

                                The basic type signature generated for each op is:

                                {constraints} => {mandatory attrs} -> {input tensors} -> {output tensors}

                                where:

                                • {mandatory attrs} is of the form A_1 -> ... -> A_N, where each A is an +

                                  tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                  Safe HaskellNone
                                  LanguageHaskell2010

                                  TensorFlow.OpGen

                                  Description

                                  Rendering of TensorFlow operations as Haskell functions.

                                  The basic type signature generated for each op is:

                                  {constraints} => {mandatory attrs} -> {input tensors} -> {output tensors}

                                  where:

                                  • {mandatory attrs} is of the form A_1 -> ... -> A_N, where each A is an op attribute that doesn't have a default and can't be inferred from other inputs.
                                  • {constraints} restrict the type parameters of the input and output tensors (for example: TensorType or OneOf).
                                  • {input tensors} is of the form T_1 -> ... -> T_N, where each T is of -the form Tensor Ref a, Tensor v a or ResourceHandle (or a list of one -of those types), and a is either a concrete type or a (constrained) type -variable.
                                  • {output tensors} is of the form (T_1,...,T_N) for "pure" ops, and +the form Tensor Ref a or Tensor v a (or a list of one of those types), +and a is either a concrete type or a (constrained) type variable.
                                  • {output tensors} is of the form (T_1,...,T_N) for "pure" ops, and Build (T_1,...,T_N) for "stateful" ops. An op is considered "stateful" if -it takes a Tensor Ref or ResourceHandle as input, or if it's explicitly -marked "Stateful" in its REGISTER_OP definition. (If there are no outputs, -it is either ControlNode or Build ControlNode.)

                                  Documentation

                                  \ No newline at end of file +it takes a Tensor Ref or Tensor v ResourceHandle as input, or if it's +explicitly marked "Stateful" in its REGISTER_OP definition. (If there +are no outputs, it is either ControlNode or Build ControlNode.)

                                Documentation

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html index d7362a3..04dd45e 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations. (Index)

                                tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                Index

                                argCaseKindTensorFlow.OpGen.ParsedOp
                                ArgKindTensorFlow.OpGen.ParsedOp
                                argKindTensorFlow.OpGen.ParsedOp
                                argLengthTensorFlow.OpGen.ParsedOp
                                ArgSomeTensorTensorFlow.OpGen.ParsedOp
                                ArgTensorBuildTensorFlow.OpGen.ParsedOp
                                ArgTensorRefTensorFlow.OpGen.ParsedOp
                                ArgTensorValueTensorFlow.OpGen.ParsedOp
                                ArgTypeTensorFlow.OpGen.ParsedOp
                                argTypeTensorFlow.OpGen.ParsedOp
                                ArgTypeAttrTensorFlow.OpGen.ParsedOp
                                argTypeAttrTensorFlow.OpGen.ParsedOp
                                ArgTypeFixedTensorFlow.OpGen.ParsedOp
                                Attr 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                AttrBaseTypeTensorFlow.OpGen.ParsedOp
                                AttrBoolTensorFlow.OpGen.ParsedOp
                                AttrBytesTensorFlow.OpGen.ParsedOp
                                attrDescriptionTensorFlow.OpGen.ParsedOp
                                AttrFloatTensorFlow.OpGen.ParsedOp
                                attrInfoTensorFlow.OpGen.ParsedOp
                                AttrInt64TensorFlow.OpGen.ParsedOp
                                AttrListTensorFlow.OpGen.ParsedOp
                                attrNameTensorFlow.OpGen.ParsedOp
                                AttrShapeTensorFlow.OpGen.ParsedOp
                                AttrSingleTensorFlow.OpGen.ParsedOp
                                AttrTensorTensorFlow.OpGen.ParsedOp
                                AttrType 
                                1 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                2 (Type/Class)TensorFlow.OpGen.ParsedOp
                                camelCaseTensorFlow.OpGen.ParsedOp
                                docOpListTensorFlow.OpGen
                                excludeListTensorFlow.OpGen
                                explicitInputAttrsTensorFlow.OpGen.ParsedOp
                                flagParserTensorFlow.OpGen
                                HaskellName 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                haskellNameTensorFlow.OpGen.ParsedOp
                                inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
                                inferredTypeAttrsTensorFlow.OpGen.ParsedOp
                                ListArgTensorFlow.OpGen.ParsedOp
                                MixedListArgTensorFlow.OpGen.ParsedOp
                                Name 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                OpGenFlags 
                                1 (Type/Class)TensorFlow.OpGen
                                2 (Data Constructor)TensorFlow.OpGen
                                outputFileTensorFlow.OpGen
                                ParsedArg 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                ParsedArgCaseTensorFlow.OpGen.ParsedOp
                                parsedArgCaseTensorFlow.OpGen.ParsedOp
                                parsedArgDescriptionTensorFlow.OpGen.ParsedOp
                                parsedArgNameTensorFlow.OpGen.ParsedOp
                                parsedInputsTensorFlow.OpGen.ParsedOp
                                ParsedOp 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                parsedOpDescriptionTensorFlow.OpGen.ParsedOp
                                parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
                                parsedOpNameTensorFlow.OpGen.ParsedOp
                                parsedOpSummaryTensorFlow.OpGen.ParsedOp
                                parsedOutputsTensorFlow.OpGen.ParsedOp
                                parseOpTensorFlow.OpGen.ParsedOp
                                prefixTensorFlow.OpGen
                                ResourceArgTensorFlow.OpGen.ParsedOp
                                SimpleArgTensorFlow.OpGen.ParsedOp
                                TFName 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                tfNameTensorFlow.OpGen.ParsedOp
                                TypeParam 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                typeParamIsListTensorFlow.OpGen.ParsedOp
                                typeParamRestrictionsTensorFlow.OpGen.ParsedOp
                                unHaskellNameTensorFlow.OpGen.ParsedOp
                                unTFNameTensorFlow.OpGen.ParsedOp
                                \ No newline at end of file +

                                tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                Index

                                ArgKindTensorFlow.OpGen.ParsedOp
                                argKindTensorFlow.OpGen.ParsedOp
                                argLengthTensorFlow.OpGen.ParsedOp
                                ArgSomeTensorTensorFlow.OpGen.ParsedOp
                                ArgTensorBuildTensorFlow.OpGen.ParsedOp
                                ArgTensorRefTensorFlow.OpGen.ParsedOp
                                ArgTensorValueTensorFlow.OpGen.ParsedOp
                                ArgTypeTensorFlow.OpGen.ParsedOp
                                argTypeTensorFlow.OpGen.ParsedOp
                                ArgTypeAttrTensorFlow.OpGen.ParsedOp
                                argTypeAttrTensorFlow.OpGen.ParsedOp
                                ArgTypeFixedTensorFlow.OpGen.ParsedOp
                                Attr 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                AttrBaseTypeTensorFlow.OpGen.ParsedOp
                                AttrBoolTensorFlow.OpGen.ParsedOp
                                AttrBytesTensorFlow.OpGen.ParsedOp
                                attrDescriptionTensorFlow.OpGen.ParsedOp
                                AttrFloatTensorFlow.OpGen.ParsedOp
                                attrInfoTensorFlow.OpGen.ParsedOp
                                AttrInt64TensorFlow.OpGen.ParsedOp
                                AttrListTensorFlow.OpGen.ParsedOp
                                attrNameTensorFlow.OpGen.ParsedOp
                                AttrShapeTensorFlow.OpGen.ParsedOp
                                AttrSingleTensorFlow.OpGen.ParsedOp
                                AttrTensorTensorFlow.OpGen.ParsedOp
                                AttrType 
                                1 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                2 (Type/Class)TensorFlow.OpGen.ParsedOp
                                camelCaseTensorFlow.OpGen.ParsedOp
                                docOpListTensorFlow.OpGen
                                excludeListTensorFlow.OpGen
                                explicitInputAttrsTensorFlow.OpGen.ParsedOp
                                flagParserTensorFlow.OpGen
                                HaskellName 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                haskellNameTensorFlow.OpGen.ParsedOp
                                inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
                                inferredTypeAttrsTensorFlow.OpGen.ParsedOp
                                ListArgTensorFlow.OpGen.ParsedOp
                                MixedListArgTensorFlow.OpGen.ParsedOp
                                Name 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                OpGenFlags 
                                1 (Type/Class)TensorFlow.OpGen
                                2 (Data Constructor)TensorFlow.OpGen
                                outputFileTensorFlow.OpGen
                                ParsedArg 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                ParsedArgCaseTensorFlow.OpGen.ParsedOp
                                parsedArgCaseTensorFlow.OpGen.ParsedOp
                                parsedArgDescriptionTensorFlow.OpGen.ParsedOp
                                parsedArgNameTensorFlow.OpGen.ParsedOp
                                parsedInputsTensorFlow.OpGen.ParsedOp
                                ParsedOp 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                parsedOpDescriptionTensorFlow.OpGen.ParsedOp
                                parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
                                parsedOpNameTensorFlow.OpGen.ParsedOp
                                parsedOpSummaryTensorFlow.OpGen.ParsedOp
                                parsedOutputsTensorFlow.OpGen.ParsedOp
                                parseOpTensorFlow.OpGen.ParsedOp
                                prefixTensorFlow.OpGen
                                SimpleArgTensorFlow.OpGen.ParsedOp
                                TFName 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                tfNameTensorFlow.OpGen.ParsedOp
                                TypeParam 
                                1 (Type/Class)TensorFlow.OpGen.ParsedOp
                                2 (Data Constructor)TensorFlow.OpGen.ParsedOp
                                typeParamIsListTensorFlow.OpGen.ParsedOp
                                typeParamRestrictionsTensorFlow.OpGen.ParsedOp
                                unHaskellNameTensorFlow.OpGen.ParsedOp
                                unTFNameTensorFlow.OpGen.ParsedOp
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/frames.html b/docs/haddock/tensorflow-opgen-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-opgen-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-opgen-0.1.0.0/index-frames.html deleted file mode 100644 index 17edefc..0000000 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations. \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/index.html b/docs/haddock/tensorflow-opgen-0.1.0.0/index.html index e140ac3..55791b5 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/index.html @@ -1,4 +1,4 @@ -tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                Please see README.md

                                \ No newline at end of file +

                                tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

                                Please see README.md

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-ParsedOp.html b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-ParsedOp.html index b7407eb..d578b75 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-ParsedOp.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-ParsedOp.html @@ -1,4 +1,4 @@ -TensorFlow.OpGen.ParsedOp

                                TensorFlow.OpGen.ParsedOp

                                \ No newline at end of file +

                                TensorFlow.OpGen.ParsedOp

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen.html b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen.html index 694d0c0..b0b4eeb 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen.html @@ -1,4 +1,4 @@ -TensorFlow.OpGen

                                TensorFlow.OpGen

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/ocean.css b/docs/haddock/tensorflow-opgen-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow.OpGen.ParsedOp.html b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow.OpGen.ParsedOp.html new file mode 100644 index 0000000..ea3af95 --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow.OpGen.ParsedOp.html @@ -0,0 +1,344 @@ +
                                -- | This module helps parse the proto OpDef into a Haskell type which is more
                                +-- descriptive of how the attributes and arguments will be used in the
                                +-- generated code.
                                +{-# LANGUAGE LambdaCase #-}
                                +{-# LANGUAGE OverloadedStrings #-}
                                +{-# LANGUAGE RecordWildCards #-}
                                +module TensorFlow.OpGen.ParsedOp
                                +    ( ParsedOp(..)
                                +    , Name(..)
                                +    , HaskellName(..)
                                +    , TFName(..)
                                +    , Attr(..)
                                +    , AttrType(..)
                                +    , AttrBaseType(..)
                                +    , TypeParam(..)
                                +    , ParsedArg(..)
                                +    , ParsedArgCase(..)
                                +    , ArgType(..)
                                +    , ArgKind(..)
                                +    , parseOp
                                +    , camelCase
                                +    ) where
                                +
                                +import Data.Char (toUpper, toLower)
                                +import Data.List (sortBy)
                                +import Data.List.NonEmpty (NonEmpty, nonEmpty)
                                +import Data.Maybe (mapMaybe)
                                +import Data.Monoid ((<>))
                                +import Data.Ord (comparing)
                                +import qualified Data.Set as Set
                                +import Data.Text (Text)
                                +import qualified Data.Text as Text
                                +import Lens.Family2 ((^.))
                                +import Proto.Tensorflow.Core.Framework.AttrValue (list)
                                +import Proto.Tensorflow.Core.Framework.OpDef
                                +    ( OpDef
                                +    , OpDef'ArgDef
                                +    , OpDef'AttrDef
                                +    , allowedValues
                                +    , attr
                                +    , maybe'defaultValue
                                +    , description
                                +    , name
                                +    , inputArg
                                +    , isRef
                                +    , isStateful
                                +    , outputArg
                                +    , summary
                                +    , typeListAttr
                                +    , numberAttr
                                +    , typeAttr
                                +    , type'
                                +    )
                                +import Proto.Tensorflow.Core.Framework.Types (DataType(DT_RESOURCE))
                                +
                                +data ParsedOp = ParsedOp
                                +    { parsedOpName :: Name
                                +    , parsedOpSummary :: Text
                                +    , parsedOpDescription :: Text
                                +    , parsedInputs :: [ParsedArg]
                                +    , parsedOutputs :: [ParsedArg]
                                +    , explicitInputAttrs :: [Attr AttrType]
                                +        -- ^ Attributes that must be set explicitly when creating the op.
                                +        -- Associated with the type of the attribute.
                                +    , inferredTypeAttrs :: [Attr TypeParam]
                                +        -- ^ Attributes that are type parameters.
                                +    , inferredListSizeAttrs :: [Attr (NonEmpty Name)]
                                +        -- Attributes which are list sizes (ints) that are inferred automatically
                                +        -- from one or more of the input tensors.
                                +        -- Associated with the list of tensors whose size it describes.
                                +    , parsedOpIsMonadic :: Bool
                                +        -- ^ Whether this op is stateful or takes a stateful input.  Such ops
                                +        -- should not be CSE'd and must be monadic in our API (i.e., return a
                                +        -- Build action).
                                +    }
                                +
                                +data Name = Name
                                +    { haskellName :: HaskellName
                                +    , tfName :: TFName
                                +    }
                                +
                                +-- | A raw name as specified in the OpDef proto.
                                +newtype TFName = TFName { unTFName :: Text }
                                +    deriving (Eq, Ord)
                                +
                                +-- | A name that's appropriate for a variable in a Haskell source file.
                                +newtype HaskellName = HaskellName { unHaskellName :: Text }
                                +
                                +-- | A named attribute, associated with some information about it.
                                +data Attr a = Attr
                                +    { attrName :: Name
                                +    , attrDescription :: Text
                                +    , attrInfo :: a
                                +    }
                                +
                                +-- | The type of an attribute.
                                +data AttrType = AttrSingle AttrBaseType
                                +                | AttrList AttrBaseType
                                +                deriving Eq
                                +
                                +data AttrBaseType = AttrBytes | AttrInt64 | AttrFloat | AttrBool
                                +                | AttrType | AttrShape | AttrTensor
                                +                deriving Eq
                                +
                                +data TypeParam = TypeParam
                                +    { typeParamIsList :: Bool
                                +    , typeParamRestrictions :: Maybe (NonEmpty DataType)
                                +        -- ^ The list of allowed types (see: TensorFlow.Types.OneOf).
                                +        -- If 'Nothing', then any type is acceptable.
                                +    }
                                +
                                +-- | An input or output argument (Tensor) for an op.
                                +data ParsedArg = ParsedArg
                                +    { parsedArgName :: Name
                                +    , parsedArgDescription :: Text
                                +    , parsedArgCase :: ParsedArgCase
                                +    }
                                +
                                +data ParsedArgCase
                                +    = SimpleArg { argType :: ArgType, argKind :: ArgKind }
                                +    | ListArg
                                +        { argLength :: Name  -- ^ The attribute that specifies this list's length.
                                +        , argType :: ArgType
                                +        , argKind :: ArgKind
                                +        }
                                +    | MixedListArg { argTypeAttr :: Name, argKind :: ArgKind }
                                +        -- ^ A heterogeneous list.
                                +
                                +maybeArgType :: ParsedArgCase -> Maybe ArgType
                                +maybeArgType MixedListArg{} = Nothing
                                +maybeArgType a = Just $ argType a
                                +
                                +-- | The type of an argument.
                                +data ArgType
                                +    = ArgTypeFixed DataType -- ^ A fixed type.
                                +    | ArgTypeAttr Name  -- ^ A type that depends on an attribute.
                                +
                                +-- The kind of an op input or output (not including the argument type `a`).
                                +data ArgKind
                                +    = ArgTensorRef -- Tensor Ref a
                                +    | ArgTensorValue -- Tensor Value a
                                +    | ArgTensorBuild -- Tensor Build a
                                +    | ArgSomeTensor Text -- Tensor v a; the Text is the variable 'v'.
                                +    deriving (Eq)
                                +
                                +isRefCase :: ParsedArgCase -> Bool
                                +isRefCase a
                                +    | ArgTensorRef <- argKind a = True
                                +    | Just (ArgTypeFixed DT_RESOURCE) <- maybeArgType a = True
                                +    | otherwise = False
                                +
                                +makeName :: Text -> Name
                                +makeName n = Name
                                +    { haskellName = HaskellName $ fixReservedName $ lowCase n
                                +    , tfName = TFName n
                                +    }
                                +
                                +-- | Change a name so it doesn't conflict with any Haskell keywords.
                                +fixReservedName :: Text -> Text
                                +fixReservedName n
                                +    | n `Set.member` reservedKeywords = n <> "'"
                                +    | otherwise = n
                                +
                                +reservedKeywords :: Set.Set Text
                                +reservedKeywords = Set.fromList $
                                +    -- Haskell2010 keywords:
                                +    -- https://www.haskell.org/onlinereport/haskell2010/haskellch2.html#x7-180002.4
                                +    -- We don't include keywords that are allowed to be variable names,
                                +    -- in particular: "as", "forall", and "hiding".
                                +    [ "case"
                                +    , "class"
                                +    , "data"
                                +    , "default"
                                +    , "deriving"
                                +    , "do"
                                +    , "else"
                                +    , "foreign"
                                +    , "if"
                                +    , "import"
                                +    , "in"
                                +    , "infix"
                                +    , "infixl"
                                +    , "infixr"
                                +    , "instance"
                                +    , "let"
                                +    , "module"
                                +    , "newtype"
                                +    , "of"
                                +    , "then"
                                +    , "type"
                                +    , "where"
                                +    ]
                                +    ++  -- Nonstandard extensions
                                +    [ "mdo"   -- RecursiveDo
                                +    , "rec"   -- Arrows, RecursiveDo
                                +    , "proc"  -- Arrows
                                +    ]
                                +
                                +-- | Lower-case the given text.
                                +lowCase :: Text -> Text
                                +lowCase = forceCase toLower
                                +
                                +forceCase :: (Char -> Char) -> Text -> Text
                                +forceCase convert s = maybe "" (\(c, cs) -> Text.cons (convert c) cs)
                                +                      (Text.uncons s)
                                +
                                +camelCase :: Text -> Text
                                +camelCase s = Text.concat $ map upCase
                                +                          $ Text.splitOn "_" s
                                +
                                +-- | Upper-case the given text.
                                +upCase :: Text -> Text
                                +upCase = forceCase toUpper
                                +
                                +
                                +parseOp :: OpDef -> ParsedOp
                                +parseOp o = ParsedOp
                                +    { parsedOpName = makeName $ o ^. name
                                +    , parsedOpSummary = o ^. summary
                                +    , parsedOpDescription = o ^. description
                                +    , ..
                                +    }
                                +  where
                                +    parsedOpIsMonadic = o ^. isStateful
                                +                    || any (isRefCase . parsedArgCase) parsedInputs
                                +                    || null (o ^. outputArg)
                                +    parsedInputs = zipWith (\t a -> parseArg a (inputTensorKind t a))
                                +                                        tensorKindParams (o ^. inputArg) 
                                +    tensorKindParams = ["v'" <> Text.pack (show x) | x <- [1::Integer ..]]
                                +    parsedOutputs = map (\a -> parseArg a (outputTensorKind parsedOpIsMonadic a))
                                +                        (o ^. outputArg)
                                +    -- Integer attributes that can be inferred from the size of at least one
                                +    -- input list.
                                +    inferredListSizeAttrs = mapMaybeAttrs (getInferredListSizeAttr parsedInputs)
                                +                                $ o ^. attr
                                +    implicitAttrs = Set.fromList $ map tfName $
                                +                        map attrName inferredTypeAttrs
                                +                            ++ map attrName inferredListSizeAttrs
                                +    inferredTypeAttrs = mapMaybeAttrs (getInferredTypeAttr argTypeParams) $ o ^. attr
                                +    argTypeParams = Set.fromList $ map tfName $
                                +                        mapMaybe (getArgTypeParam . parsedArgCase) $
                                +                            parsedInputs ++ parsedOutputs
                                +    -- Attributes that can't be inferred and don't have defaults, so must be
                                +    -- passed as separate arguments to the op.
                                +    explicitInputAttrs = sortBy (comparing (tfName . attrName))
                                +                        $ mapMaybeAttrs (getExplicitInputAttr o implicitAttrs)
                                +                        $ o ^. attr
                                +
                                +-- TODO(judahjacobson): Some arguments should be refs.
                                +inputTensorKind :: Text -> OpDef'ArgDef -> ArgKind
                                +inputTensorKind v a
                                +    | a ^. isRef = ArgTensorRef
                                +    | otherwise = ArgSomeTensor v
                                +
                                +outputTensorKind :: Bool -> OpDef'ArgDef -> ArgKind
                                +outputTensorKind isMonadic a
                                +    | a ^. isRef = ArgTensorRef
                                +    | isMonadic = ArgTensorValue
                                +    | otherwise = ArgTensorBuild
                                +
                                +getExplicitInputAttr :: OpDef -> Set.Set TFName -> OpDef'AttrDef -> Maybe AttrType
                                +getExplicitInputAttr o implicitAttrs a
                                +    | TFName (a ^. name) `Set.notMember` implicitAttrs
                                +    , a ^. maybe'defaultValue == Nothing
                                +    , t <- parseAttrType o (a ^. type')
                                +    , t `elem` map AttrSingle
                                +                    [AttrBool, AttrInt64, AttrFloat, AttrType, AttrShape]
                                +                ++ [AttrList AttrType] = Just t
                                +    | otherwise = Nothing
                                +
                                +getInferredTypeAttr :: Set.Set TFName -> OpDef'AttrDef -> Maybe TypeParam
                                +getInferredTypeAttr argTypeParams a
                                +    | TFName (a ^. name) `notElem` argTypeParams = Nothing
                                +    | a ^. type' == "type" = Just $ TypeParam False allowed
                                +    | a ^. type' == "list(type)" = Just $ TypeParam True allowed
                                +    | otherwise = Nothing
                                +  where
                                +    allowed = nonEmpty (a ^. allowedValues . list . type')
                                +
                                +getArgTypeParam :: ParsedArgCase -> Maybe Name
                                +getArgTypeParam SimpleArg { argType = ArgTypeAttr n} = Just n
                                +getArgTypeParam ListArg { argType = ArgTypeAttr n} = Just n
                                +getArgTypeParam MixedListArg { argTypeAttr = n } = Just n
                                +getArgTypeParam _ = Nothing
                                +
                                +getInferredListSizeAttr :: [ParsedArg] -> OpDef'AttrDef -> Maybe (NonEmpty Name)
                                +getInferredListSizeAttr inputs a
                                +    | a ^. type' == "int"
                                +        = nonEmpty [t | ParsedArg { parsedArgName = t
                                +                                  , parsedArgCase
                                +                                        = ListArg { argLength = n }
                                +                                  } <- inputs
                                +                      , TFName (a ^. name) == tfName n]
                                +    | otherwise = Nothing
                                +
                                +-- | Like mapMaybe, but associates the attribute name/description with the given info.
                                +mapMaybeAttrs :: (OpDef'AttrDef -> Maybe a) -> [OpDef'AttrDef] -> [Attr a]
                                +mapMaybeAttrs f = mapMaybe $ \a -> do
                                +                            x <- f a
                                +                            Just Attr
                                +                                { attrName = makeName (a ^. name)
                                +                                , attrDescription = a ^. description
                                +                                , attrInfo = x
                                +                                }
                                +
                                +parseArg :: OpDef'ArgDef -> ArgKind -> ParsedArg
                                +parseArg a tKind = ParsedArg
                                +    { parsedArgName = makeName (a ^. name)
                                +    , parsedArgDescription = a ^. description
                                +    , parsedArgCase = parseArgCase a tKind
                                +    }
                                +
                                +parseArgCase :: OpDef'ArgDef -> ArgKind -> ParsedArgCase
                                +parseArgCase a tKind
                                +    | Just n <- maybeAttr (a ^. typeListAttr) = MixedListArg n tKind
                                +    | Just n <- maybeAttr (a ^. numberAttr) = ListArg n thisArgType tKind
                                +    | otherwise = SimpleArg thisArgType tKind
                                +  where
                                +    thisArgType
                                +        | Just n <- maybeAttr (a ^. typeAttr) = ArgTypeAttr n
                                +        | otherwise = ArgTypeFixed (a ^. type')
                                +    maybeAttr :: Text -> Maybe Name
                                +    maybeAttr "" = Nothing
                                +    maybeAttr t = Just $ makeName t
                                +
                                +parseAttrType :: OpDef -> Text -> AttrType
                                +parseAttrType o = \case
                                +    "string" -> AttrSingle AttrBytes
                                +    "int" -> AttrSingle AttrInt64
                                +    "float" -> AttrSingle AttrFloat
                                +    "bool" -> AttrSingle AttrBool
                                +    "type" -> AttrSingle AttrType
                                +    "shape" -> AttrSingle AttrShape
                                +    "tensor" -> AttrSingle AttrTensor
                                +    "list(string)" -> AttrList AttrBytes
                                +    "list(int)" -> AttrList AttrInt64
                                +    "list(float)" -> AttrList AttrFloat
                                +    "list(bool)" -> AttrList AttrBool
                                +    "list(type)" -> AttrList AttrType
                                +    "list(shape)" -> AttrList AttrShape
                                +    "list(tensor)" -> AttrList AttrTensor
                                +    t -> error $ "parseAttrType: unrecognized type " ++ show t
                                +              ++ " for op " ++ show (o ^. name)
                                +
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow.OpGen.html b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow.OpGen.html new file mode 100644 index 0000000..bce0340 --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow.OpGen.html @@ -0,0 +1,453 @@ +
                                -- Copyright 2016 TensorFlow authors.
                                +--
                                +-- Licensed under the Apache License, Version 2.0 (the "License");
                                +-- you may not use this file except in compliance with the License.
                                +-- You may obtain a copy of the License at
                                +--
                                +--     http://www.apache.org/licenses/LICENSE-2.0
                                +--
                                +-- Unless required by applicable law or agreed to in writing, software
                                +-- distributed under the License is distributed on an "AS IS" BASIS,
                                +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                                +-- See the License for the specific language governing permissions and
                                +-- limitations under the License.
                                +
                                +{-# LANGUAGE CPP #-}
                                +{-# LANGUAGE FlexibleContexts #-}
                                +{-# LANGUAGE LambdaCase #-}
                                +{-# LANGUAGE OverloadedStrings #-}
                                +{-# LANGUAGE TypeFamilies #-}
                                +{- | Rendering of TensorFlow operations as Haskell functions.
                                +
                                +The basic type signature generated for each op is:
                                +
                                +> {constraints} => {mandatory attrs} -> {input tensors} -> {output tensors}
                                +
                                +where:
                                +
                                +* @{mandatory attrs}@ is of the form @A_1 -> ... -> A_N@, where each @A@ is an
                                + op attribute that doesn't have a default and can't be inferred from other
                                + inputs.
                                +
                                +* @{constraints}@ restrict the type parameters of the input and output tensors
                                + (for example: 'TensorType' or 'OneOf').
                                +
                                +* @{input tensors}@ is of the form @T_1 -> ... -> T_N@, where each @T@ is of
                                +the form @Tensor Ref a@ or @Tensor v a@ (or a list of one of those types),
                                +and @a@ is either a concrete type or a (constrained) type variable.
                                +
                                +* @{output tensors}@ is of the form @(T_1,...,T_N)@ for "pure" ops, and
                                +@Build (T_1,...,T_N)@ for "stateful" ops.  An op is considered "stateful" if
                                +it takes a @Tensor Ref@ or @Tensor v ResourceHandle@ as input, or if it's
                                +explicitly marked \"Stateful\" in its @REGISTER_OP@ definition.  (If there
                                +are no outputs, it is either @ControlNode@ or @Build ControlNode@.)
                                +-}
                                +
                                +module TensorFlow.OpGen
                                +  ( OpGenFlags(..)
                                +  , docOpList
                                +  , flagParser)
                                +  where
                                +
                                +import Data.Foldable (toList)
                                +import Data.Maybe (fromMaybe)
                                +import Data.ProtoLens (def, showMessage)
                                +import Data.List (sortOn)
                                +import Data.List.NonEmpty (NonEmpty)
                                +import qualified Data.List.NonEmpty as NE
                                +import Lens.Family2 ((^.), (.~), (&), view)
                                +import Options.Applicative (Parser, help, long, strOption, value)
                                +import Proto.Tensorflow.Core.Framework.OpDef
                                +  ( OpList
                                +  , OpDef
                                +  , attr
                                +  , inputArg
                                +  , name
                                +  , op
                                +  , outputArg
                                +  )
                                +import Proto.Tensorflow.Core.Framework.Types (DataType(..))
                                +import System.FilePath (takeBaseName)
                                +import TensorFlow.OpGen.ParsedOp
                                +import Text.PrettyPrint.Mainland
                                +  ( Doc
                                +  , (<>)
                                +  , (<+>)
                                +  , (</>)
                                +  , (<+/>)
                                +  , brackets
                                +  , comma
                                +  , commasep
                                +  , dquotes
                                +  , empty
                                +  , enclose
                                +  , flatten
                                +  , folddoc
                                +  , hang
                                +  , indent
                                +  , parens
                                +  , sep
                                +  , stack
                                +  , strictText
                                +  , tuple
                                +  )
                                +import qualified Data.Set as Set
                                +import qualified Data.Text as Text
                                +
                                +data OpGenFlags = OpGenFlags
                                +     { outputFile :: String
                                +     , prefix :: String
                                +     , excludeList :: String
                                +     }
                                +
                                +flagParser :: Parser OpGenFlags
                                +flagParser = OpGenFlags
                                +     <$> strOption (mconcat [ long "output"
                                +                            , help "File to write."
                                +                            ])
                                +     <*> strOption (mconcat [ long "prefix"
                                +                            , help "Haskell package prefix to use"
                                +                            ])
                                +     <*> strOption (mconcat [ long "exclude_list"
                                +                            , value ""
                                +                            , help "Comma separated Ops names to ignore"
                                +                            ])
                                +
                                +
                                +docOpList :: OpGenFlags -> OpList -> Doc
                                +docOpList flags opList =
                                +  stack [ "{-# LANGUAGE ConstraintKinds #-}"
                                +        , "{-# LANGUAGE DataKinds #-}"
                                +        , "{-# LANGUAGE FlexibleContexts #-}"
                                +        , "{-# LANGUAGE FlexibleInstances #-}"
                                +        , "{-# LANGUAGE OverloadedStrings #-}"
                                +        , "{-# LANGUAGE ScopedTypeVariables #-}"
                                +          -- Avoids reports about shadowing standard library names.
                                +        , "{-# OPTIONS_GHC -fno-warn-name-shadowing #-}"
                                +          -- eqLengthGuard never returns false and dies instead.
                                +        , "{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}"
                                +        , "module" <+> strictText moduleName <+> "where"
                                +        , empty
                                +        , imports
                                +        , empty
                                +        , folddoc (\x y -> x </> empty </> y)
                                +                  (map renderOpAndExtras $
                                +                   sortOn (view name) $
                                +                   filter (not . flip elem exclusions . view name) $
                                +                   toList $ opList ^. op)
                                +        ]
                                +  where moduleName =
                                +            Text.pack (prefix flags) <> "." <> camelCase
                                +             -- Discards the optional trailing _ops_op_lib
                                +            (fromMaybe shortName (Text.stripSuffix "_ops_op_lib" shortName))
                                +        shortName = Text.pack (takeBaseName $ outputFile flags)
                                +        exclusions = Text.splitOn "," $ Text.pack $ excludeList flags
                                +        renderOpAndExtras o = renderOp (parseOp o) </> extras o
                                +
                                +imports :: Doc
                                +imports = stack [
                                +      "import Data.ByteString (ByteString)"
                                +    , "import Data.Complex (Complex)"
                                +    , "import Data.Int (Int8, Int16, Int32, Int64)"
                                +    , "import Data.Proxy (Proxy(Proxy))"
                                +    , "import Data.Word (Word8, Word16)"
                                +    , "import Lens.Family2 ((.~), (&))"
                                +    , "import TensorFlow.Build"
                                +    , "import TensorFlow.BuildOp"
                                +    , "import TensorFlow.Tensor"
                                +    , "import TensorFlow.Types"
                                +    ]
                                +
                                +renderHaskellName, renderTFName, renderQuotedTFName :: Name -> Doc
                                +renderHaskellName = strictText . unHaskellName . haskellName
                                +renderTFName = strictText . unTFName . tfName
                                +renderQuotedTFName = dquotes . renderTFName
                                +
                                +
                                +-- | Generate the source code for a single op.
                                +-- For example:
                                +--
                                +-- -- | {haddock comment}
                                +-- foo :: {type sig}
                                +-- foo attr1 attr2 input1 input2 | eqLengthGuard [...] = {function body}
                                +renderOp :: ParsedOp -> Doc
                                +renderOp pOp = stack $
                                +    [ haddocks
                                +    -- Prevent unreasonably long compilation times on ghc-7.10, due
                                +    -- to stack calling "-dump-hi" which (unnecessarily) includes the
                                +    -- inlining information, and is large for ops with many arguments.
                                +#if __GLASGOW_HASKELL__ < 800
                                +    , "{-# NOINLINE" <+> n <+> "#-}"
                                +#endif
                                +    , n <+> "::" <+> hang 0 (typeSig empty pOp)
                                +    , n <+> "=" <+> n <> "' id"
                                +    , n' <+> "::" <+> hang 0 (typeSig "OpParams ->" pOp)
                                +    , n' <+> hang 0 args <+> "|" <+> funcGuard listSizeAttrs
                                +                <+> "=" </>  -- args are indented
                                +                    -- the body needs to be indented wrt the name
                                +                    indent indentation (functionBody pOp)
                                +    ] ++ whereClause listSizeAttrs
                                +  where
                                +    n = renderHaskellName $ parsedOpName pOp
                                +    n' = n <> "'"
                                +    listSizeAttrs = inferredListSizeAttrs pOp
                                +    args = sep $ "op'options"
                                +               : (map renderHaskellName
                                +                    $ map attrName (explicitInputAttrs pOp)
                                +                    ++ map parsedArgName (parsedInputs pOp))
                                +    haddocks = "-- |" <+> multilineComment (parsedOpSummary pOp) (parsedOpDescription pOp)
                                +
                                +-- | A check that all lists of the given size have the given length.
                                +-- For example:
                                +--   eqLengthGuard [("N", [("input1", length input1), ("input2", length input2)])]
                                +funcGuard :: [Attr (NonEmpty Name)] -> Doc
                                +funcGuard attrs = "eqLengthGuard" <+> brackets (commasep entries)
                                +      where
                                +        entries =
                                +            [ parens $ nAttr <> comma <+>
                                +              brackets (commasep $ toList $
                                +                            map renderTensorName (toList $ attrInfo a))
                                +            | a <- attrs
                                +            , let nAttr = renderQuotedTFName (attrName a)
                                +            ]
                                +        renderTensorName x = parens $ renderQuotedTFName x <> comma <+>
                                +                        "length" <+> renderHaskellName x
                                +
                                +-- | Define the implicit list length attributes.
                                +-- For example:
                                +--   where
                                +--     n1 = fromIntegral (length input1) :: Int64
                                +--     n2 = fromIntegral (length input2) :: Int64
                                +whereClause :: [Attr (NonEmpty Name)] -> [Doc]
                                +whereClause [] = []
                                +whereClause as = [indent 2 $ "where" </> indent 2 (stack $ map defineLengthAttr as)]
                                +  where
                                +    defineLengthAttr a = renderHaskellAttrName a <+> "="
                                +                            <+> "fromIntegral (length"
                                +                            <+> renderHaskellName (NE.head $ attrInfo a)
                                +                            <> ") :: Int64"
                                +
                                +renderHaskellAttrName :: Attr a -> Doc
                                +renderHaskellAttrName = renderHaskellName . attrName
                                +
                                +functionBody :: ParsedOp -> Doc
                                +functionBody pOp
                                +    | parsedOpIsMonadic pOp
                                +        = "build $ do"
                                +            </> indent indentation (bindOpInputsVar
                                +                        </> "buildOp" <+> outputListsSizes <+> opDef)
                                +    | otherwise
                                +        = "pureOp" <+> outputListsSizes <+> "$ do"
                                +            </> indent indentation (bindOpInputsVar </> "return" <+> opDef)
                                +  where
                                +    outputListsSizes = brackets $ commasep
                                +        [ renderHaskellName a
                                +        | ParsedArg { parsedArgCase = ListArg { argLength = a } }
                                +            <- parsedOutputs pOp
                                +        ]
                                +    opInputsVar = "op'inputs"
                                +    bindOpInputsVar = opInputsVar <+> "<- fmap Prelude.concat $ Prelude.sequence"
                                +                            <+> brackets (commasep $ map (\a -> "buildInputs" <+> a) tensorArgs)
                                +    opDef = parens $ hang 0 $ stack $
                                +        "opDef" <+> renderQuotedTFName (parsedOpName pOp) :
                                +        -- Renders type parameter arguments.
                                +        [ "& opAttr" <+> renderQuotedTFName n <+> ".~" <+> inferredTypeExpr a
                                +        | a <- inferredTypeAttrs pOp, let n = attrName a
                                +        ] ++
                                +        -- Renders mandatory attributes as function parameters.
                                +        [ "& opAttr" <+> renderQuotedTFName n <+> ".~" <+> renderHaskellName n
                                +        | a <- explicitInputAttrs pOp, let n = attrName a
                                +        ] ++
                                +        -- Renders sizes of tensor list types having number_attr.
                                +        [ "& opAttr" <+> renderQuotedTFName n <+> ".~" <+> renderHaskellName n
                                +        | a <- inferredListSizeAttrs pOp, let n = attrName a
                                +        ] ++
                                +        ["& op'options & opInputs .~" <+> opInputsVar]
                                +    tensorArgs = renderTensorArg <$> parsedInputs pOp
                                +    renderTensorArg = renderHaskellName . parsedArgName
                                +    inferredTypeExpr a
                                +        | typeParamIsList $ attrInfo a
                                +            = "fromTensorTypes (Proxy :: Proxy" <+> renderHaskellAttrName a
                                +                    <> ")"
                                +        | otherwise = "tensorType (undefined ::" <+> renderHaskellAttrName a
                                +                            <> ")"
                                +
                                +-- | Write a comment with the inputs/outputs/attributes in proto format, for
                                +-- debugging.
                                +extras :: OpDef -> Doc
                                +extras d = enclose "{-\n" "\n-}" $
                                +            strictText $ Text.pack $
                                +            showMessage ((def :: OpDef)
                                +                        & inputArg .~ (d ^. inputArg)
                                +                        & outputArg .~ (d ^. outputArg)
                                +                        & attr .~ (d ^. attr))
                                +
                                +-- | The type signature for an op.
                                +-- Of the form:
                                +-- forall t1 t2 v1 v2 . (TensorType t1, TensorType t2)
                                +--      => {pre} Float -> Tensor t1 v1 -> Tensor t2 v2
                                +-- where "Float" is an explicit input attribute, "Tensor t1 v1" is an input, and
                                +-- "Tensor t2 v2" is an output.
                                +typeSig :: Doc -> ParsedOp -> Doc
                                +typeSig pre pOp = constraints
                                +            <+/> pre </> signatureFold (map attrInput (explicitInputAttrs pOp)
                                +                                ++ map tensorArgAndComment (parsedInputs pOp)
                                +                                ++ [outputs])
                                +  where
                                +    constraints
                                +        | null classConstraints = empty
                                +        | otherwise = "forall" <+> sep typeParams <+> "." <+> tuple classConstraints <+> "=>"
                                +    typeParams = [strictText v | k <- parsedInputs pOp ++ parsedOutputs pOp,
                                +                  ArgSomeTensor v <- [argKind $ parsedArgCase k]]
                                +                ++ [renderHaskellAttrName n | n <- inferredTypeAttrs pOp]
                                +                ++ if parsedOpIsMonadic pOp then ["m'"] else []
                                +    -- Use m' as the type parameter to avoid clashing with an attribute name.
                                +    monadConstraint
                                +        | parsedOpIsMonadic pOp = ["MonadBuild m'"]
                                +        | otherwise = []
                                +    classConstraints = monadConstraint ++ map tensorArgConstraint
                                +                                                    (inferredTypeAttrs pOp)
                                +    signatureFold = folddoc (\x y -> x </> "->" <+> y)
                                +    attrInput a = renderAttrType (attrInfo a) <+> hang 0 ("-- ^" <+> attrComment a)
                                +    renderAttrType (AttrSingle a) = renderAttrBaseType a
                                +    renderAttrType (AttrList a) = brackets $ renderAttrBaseType a
                                +    renderAttrBaseType = \case
                                +        AttrBytes -> "ByteString"
                                +        AttrInt64 -> "Data.Int.Int64"
                                +        AttrFloat -> "Float"
                                +        AttrBool -> "Bool"
                                +        AttrType -> "DataType"
                                +        AttrShape -> "Shape"
                                +        AttrTensor -> "TensorProto"
                                +
                                +    tensorArgAndComment t = tensorArg t <+> hang 0 ("-- ^" <+> argComment t)
                                +    outputs = case parsedOutputs pOp of
                                +        [] -> wrapOutput "ControlNode"
                                +        -- TODO(judahjacobson): To improve indentation: `tensorArgAndComment a`
                                +        [a] -> wrapOutput (tensorArg a) <+> "-- ^" <+> argComment a
                                +        as -> wrapOutput (tuple (map tensorArg as)) <+/> resultComment as
                                +    wrapOutput o
                                +        | parsedOpIsMonadic pOp = "m'" <+> parens o
                                +        | otherwise = o
                                +
                                +-- | Render an op input or output.
                                +-- For example: "Tensor Ref Int64", "Tensor v t"
                                +tensorArg :: ParsedArg -> Doc
                                +tensorArg p = case parsedArgCase p of
                                +    SimpleArg { argType = t, argKind = k } -> tensorType t k
                                +    ListArg { argType = t, argKind = k } -> brackets $ tensorType t k
                                +    MixedListArg {argTypeAttr = t, argKind = k}
                                +        -> "TensorList" <+> parens (kind k) <+> renderHaskellName t
                                +  where
                                +    kind k = case k of
                                +                ArgTensorRef -> "Ref"
                                +                ArgTensorValue -> "Value"
                                +                ArgTensorBuild -> "Build"
                                +                ArgSomeTensor v -> strictText v
                                +    tensorType t k = let
                                +        a = case t of
                                +                ArgTypeFixed dt -> strictText $ dtTypeToHaskell dt
                                +                ArgTypeAttr n -> renderHaskellName n
                                +        in "Tensor" <+> kind k <+> a
                                +
                                +attrComment :: Attr a -> Doc
                                +attrComment a = argComment' (attrName a) (attrDescription a)
                                +
                                +argComment :: ParsedArg -> Doc
                                +argComment a = argComment' (parsedArgName a) (parsedArgDescription a)
                                +
                                +argComment' :: Name -> Text.Text -> Doc
                                +argComment' argName argDesc =
                                +    bold (renderTFName argName) <> splitMultilineText (":" <+>) argDesc
                                +
                                +bold :: Doc -> Doc
                                +bold n = "__" <> n <> "__"
                                +
                                +-- | Comment for the outputs of an op.
                                +-- For example:
                                +--   -- ^ (__output1__, __output2__)
                                +--   --
                                +--   -- * __output1__: description1
                                +--   --
                                +--   -- * __output2__: description2
                                +resultComment :: [ParsedArg] -> Doc
                                +resultComment os = stack $ flatten commentSummary : map commentDetails os
                                +  where
                                +    commentSummary = "-- ^" <+> tuple [bold (renderTFName $ parsedArgName o) | o <- os]
                                +    commentDetails o =
                                +        stack [ "--"
                                +              , "-- *" <+> argComment o
                                +              ]
                                +
                                +-- | Constraints for a given type parameter.
                                +-- E.g.: "TensorType t" or "OneOf [Int64, Float] t"
                                +-- or "TensorTypes ts" or "OneOfs [..] ts".
                                +tensorArgConstraint :: Attr TypeParam -> Doc
                                +tensorArgConstraint a = case attrInfo a of
                                +    TypeParam False Nothing -> "TensorType" <+> n
                                +    TypeParam False (Just as) -> "OneOf" <+> typeList as <+> n
                                +    TypeParam True Nothing -> "TensorTypes" <+> n
                                +    TypeParam True (Just as) -> "OneOfs" <+> typeList as <+> n
                                +  where
                                +    n = renderHaskellAttrName a
                                +    -- Produces a type-level list, e.g.: '[Int32,Int64,Float]
                                +    typeList = ("'" <>) . brackets . commasep . map strictText .
                                +                    Set.toList . Set.fromList .
                                +                    map dtTypeToHaskell . toList
                                +
                                +-- NOTE: The cases of this function should be kept in sync with
                                +-- TensorFlow.Types.AllTensorTypes.
                                +dtTypeToHaskell :: DataType -> Text.Text
                                +dtTypeToHaskell DT_BOOL = "Bool"
                                +dtTypeToHaskell DT_BFLOAT16 = "Data.Word.Word16"
                                +dtTypeToHaskell DT_COMPLEX128 = "(Data.Complex.Complex Double)"
                                +dtTypeToHaskell DT_COMPLEX64 = "(Data.Complex.Complex Float)"
                                +dtTypeToHaskell DT_DOUBLE = "Double"
                                +dtTypeToHaskell DT_FLOAT = "Float"
                                +dtTypeToHaskell DT_INT16 = "Data.Int.Int16"
                                +dtTypeToHaskell DT_INT32 = "Data.Int.Int32"
                                +dtTypeToHaskell DT_INT64 = "Data.Int.Int64"
                                +dtTypeToHaskell DT_INT8 = "Data.Int.Int8"
                                +dtTypeToHaskell DT_QINT32 = "Data.Int.Int32"  -- TODO(gnezdo): make unique
                                +dtTypeToHaskell DT_QINT8 = "Data.Word.Word8"  -- TODO(gnezdo): make unique
                                +dtTypeToHaskell DT_QINT16 = "Data.Int.Int16"  -- TODO(gnezdo): make unique
                                +dtTypeToHaskell DT_QUINT16 = "Data.Word.Word16"  -- TODO(gnezdo): make unique
                                +dtTypeToHaskell DT_QUINT8 = "Data.Word.Word8"  -- TODO(gnezdo): make unique
                                +dtTypeToHaskell DT_STRING = "Data.ByteString.ByteString"
                                +dtTypeToHaskell DT_UINT16 = "Data.Word.Word16"
                                +dtTypeToHaskell DT_HALF = "Data.Word.Word16"  -- TODO(gnezdo): make unique
                                +dtTypeToHaskell DT_UINT8 = "Data.Word.Word8"
                                +dtTypeToHaskell DT_RESOURCE = "ResourceHandle"
                                +dtTypeToHaskell x =
                                +    Text.pack $ "Unsupported type in dtTypeToHaskell: " ++ show x
                                +
                                +-- | haddockComment escapes TensorFlow doc strings into haddock.
                                +-- TODO(gnezdo): deal with the markup.
                                +haddockComment :: Text.Text -> Doc
                                +haddockComment = strictText
                                +
                                +-- | Generate a multiline comment.  For example:
                                +--   summary'
                                +--   --
                                +--   -- detail_line1
                                +--   -- detail_line2
                                +--   -- ...
                                +multilineComment :: Text.Text -> Text.Text -> Doc
                                +multilineComment summary' detail =
                                +    haddockComment summary' </>
                                +    splitMultilineText insertParagraphAndComment detail
                                +  where insertParagraphAndComment x = "--" </> "--" <+> x
                                +
                                +-- | Converts the given multi-line detail string into
                                +-- a multi-line haddock. Applies the given lead to the
                                +-- first line. Returns an empty document for empty detail.
                                +splitMultilineText :: (Doc -> Doc) -> Text.Text -> Doc
                                +splitMultilineText lead detail =
                                +  case Text.lines detail of
                                +    [] -> empty
                                +    (l : ls) -> stack $ lead (haddockComment l)
                                +                      : map (("--" <+>) . haddockComment) ls
                                +
                                +indentation :: Int
                                +indentation = 4
                                +
                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-opgen-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/style.css b/docs/haddock/tensorflow-opgen-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt b/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt deleted file mode 100644 index 79cc37a..0000000 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt +++ /dev/null @@ -1,161 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | Code generation for TensorFlow operations. --- --- Please see README.md -@package tensorflow-opgen -@version 0.1.0.0 - - --- | This module helps parse the proto OpDef into a Haskell type which is --- more descriptive of how the attributes and arguments will be used in --- the generated code. -module TensorFlow.OpGen.ParsedOp -data ParsedOp -ParsedOp :: Name -> Text -> Text -> [ParsedArg] -> [ParsedArg] -> [Attr AttrType] -> [Attr TypeParam] -> [Attr (NonEmpty Name)] -> Bool -> ParsedOp -[parsedOpName] :: ParsedOp -> Name -[parsedOpSummary] :: ParsedOp -> Text -[parsedOpDescription] :: ParsedOp -> Text -[parsedInputs] :: ParsedOp -> [ParsedArg] -[parsedOutputs] :: ParsedOp -> [ParsedArg] - --- | Attributes that must be set explicitly when creating the op. --- Associated with the type of the attribute. -[explicitInputAttrs] :: ParsedOp -> [Attr AttrType] - --- | Attributes that are type parameters. -[inferredTypeAttrs] :: ParsedOp -> [Attr TypeParam] -[inferredListSizeAttrs] :: ParsedOp -> [Attr (NonEmpty Name)] - --- | Whether this op is stateful or takes a stateful input. Such ops should --- not be CSE'd and must be monadic in our API (i.e., return a Build --- action). -[parsedOpIsMonadic] :: ParsedOp -> Bool -data Name -Name :: HaskellName -> TFName -> Name -[haskellName] :: Name -> HaskellName -[tfName] :: Name -> TFName - --- | A name that's appropriate for a variable in a Haskell source file. -newtype HaskellName -HaskellName :: Text -> HaskellName -[unHaskellName] :: HaskellName -> Text - --- | A raw name as specified in the OpDef proto. -newtype TFName -TFName :: Text -> TFName -[unTFName] :: TFName -> Text - --- | A named attribute, associated with some information about it. -data Attr a -Attr :: Name -> Text -> a -> Attr a -[attrName] :: Attr a -> Name -[attrDescription] :: Attr a -> Text -[attrInfo] :: Attr a -> a - --- | The type of an attribute. -data AttrType -AttrSingle :: AttrBaseType -> AttrType -AttrList :: AttrBaseType -> AttrType -data AttrBaseType -AttrBytes :: AttrBaseType -AttrInt64 :: AttrBaseType -AttrFloat :: AttrBaseType -AttrBool :: AttrBaseType -AttrType :: AttrBaseType -AttrShape :: AttrBaseType -AttrTensor :: AttrBaseType -data TypeParam -TypeParam :: Bool -> Maybe (NonEmpty DataType) -> TypeParam -[typeParamIsList] :: TypeParam -> Bool - --- | The list of allowed types (see: TensorFlow.Types.OneOf). If --- Nothing, then any type is acceptable. -[typeParamRestrictions] :: TypeParam -> Maybe (NonEmpty DataType) - --- | An input or output argument (Tensor) for an op. -data ParsedArg -ParsedArg :: Name -> Text -> ParsedArgCase -> ParsedArg -[parsedArgName] :: ParsedArg -> Name -[parsedArgDescription] :: ParsedArg -> Text -[parsedArgCase] :: ParsedArg -> ParsedArgCase -data ParsedArgCase -SimpleArg :: ArgType -> ArgKind -> ParsedArgCase -[argType] :: ParsedArgCase -> ArgType -[argCaseKind] :: ParsedArgCase -> ArgKind -ListArg :: Name -> ArgType -> ArgKind -> ParsedArgCase - --- | The attribute that specifies this list's length. -[argLength] :: ParsedArgCase -> Name -[argType] :: ParsedArgCase -> ArgType -[argCaseKind] :: ParsedArgCase -> ArgKind - --- | A heterogeneous list. -MixedListArg :: Name -> ArgKind -> ParsedArgCase -[argTypeAttr] :: ParsedArgCase -> Name -[argCaseKind] :: ParsedArgCase -> ArgKind -ResourceArg :: ParsedArgCase - --- | The type of an argument. -data ArgType - --- | A fixed type. -ArgTypeFixed :: DataType -> ArgType - --- | A type that depends on an attribute. -ArgTypeAttr :: Name -> ArgType -data ArgKind -ArgTensorRef :: ArgKind -ArgTensorValue :: ArgKind -ArgTensorBuild :: ArgKind -ArgSomeTensor :: Text -> ArgKind -argKind :: ParsedArgCase -> Maybe ArgKind -parseOp :: OpDef -> ParsedOp -camelCase :: Text -> Text -instance GHC.Classes.Eq TensorFlow.OpGen.ParsedOp.ArgKind -instance GHC.Classes.Eq TensorFlow.OpGen.ParsedOp.AttrType -instance GHC.Classes.Eq TensorFlow.OpGen.ParsedOp.AttrBaseType -instance GHC.Classes.Ord TensorFlow.OpGen.ParsedOp.TFName -instance GHC.Classes.Eq TensorFlow.OpGen.ParsedOp.TFName - - --- | Rendering of TensorFlow operations as Haskell functions. --- --- The basic type signature generated for each op is: --- ---
                                ---   {constraints} => {mandatory attrs} -> {input tensors} -> {output tensors}
                                ---   
                                --- --- where: --- ---
                                  ---
                                • {mandatory attrs} is of the form A_1 -> ... -> --- A_N, where each A is an op attribute that doesn't have a --- default and can't be inferred from other inputs.
                                • ---
                                • {constraints} restrict the type parameters of the input --- and output tensors (for example: TensorType or --- OneOf).
                                • ---
                                • {input tensors} is of the form T_1 -> ... -> --- T_N, where each T is of the form Tensor Ref a, --- Tensor v a or ResourceHandle (or a list of one of --- those types), and a is either a concrete type or a --- (constrained) type variable.
                                • ---
                                • {output tensors} is of the form (T_1,...,T_N) --- for "pure" ops, and Build (T_1,...,T_N) for "stateful" ops. --- An op is considered "stateful" if it takes a Tensor Ref or --- ResourceHandle as input, or if it's explicitly marked --- "Stateful" in its REGISTER_OP definition. (If there are no --- outputs, it is either ControlNode or Build --- ControlNode.)
                                • ---
                                -module TensorFlow.OpGen -data OpGenFlags -OpGenFlags :: String -> String -> String -> OpGenFlags -[outputFile] :: OpGenFlags -> String -[prefix] :: OpGenFlags -> String -[excludeList] :: OpGenFlags -> String -docOpList :: OpGenFlags -> OpList -> Doc -flagParser :: Parser OpGenFlags diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/LICENSE b/docs/haddock/tensorflow-ops-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html index 1575c69..1ce3143 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html @@ -1,16 +1,16 @@ -TensorFlow.EmbeddingOps

                                tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.EmbeddingOps

                                Description

                                Parallel lookups on the list of tensors.

                                Synopsis

                                Documentation

                                embeddingLookup

                                Arguments

                                :: (MonadBuild m, Rendered v1, TensorType a, OneOf `[Int64, Int32]` b, Num b) 
                                => [Tensor v1 a]

                                A list of tensors which can be concatenated along - dimension 0. Each Tensor must be appropriately - sized for mod partition strategy.

                                -> Tensor v2 b

                                A Tensor with type int32 or int64 +

                                tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.EmbeddingOps

                                Description

                                Parallel lookups on the list of tensors.

                                Synopsis

                                Documentation

                                embeddingLookup Source #

                                Arguments

                                :: (MonadBuild m, Rendered (Tensor v1), TensorType a, OneOf '[Int64, Int32] b, Num b) 
                                => [Tensor v1 a]

                                A list of tensors which can be concatenated along + dimension 0. Each Tensor must be appropriately + sized for mod partition strategy.

                                -> Tensor v2 b

                                A Tensor with type int32 or int64 containing the ids to be looked up in params. The ids are required to have fewer than 2^31 - entries.

                                -> m (Tensor Value a)

                                A dense tensor with shape `shape(ids) + shape(params)[1:]`.

                                Looks up ids in a list of embedding tensors.

                                This function is used to perform parallel lookups on the list of + entries.

                                -> m (Tensor Value a)

                                A dense tensor with shape `shape(ids) + shape(params)[1:]`.

                                Looks up ids in a list of embedding tensors.

                                This function is used to perform parallel lookups on the list of tensors in params. It is a generalization of gather, where params is interpreted as a partition of a larger embedding tensor.

                                The partition_strategy is "mod", we assign each id to partition `p = id % len(params)`. For instance, 13 ids are split across 5 partitions as: `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`

                                The results of the lookup are concatenated into a dense - tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.

                                \ No newline at end of file + tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html index 458ec9d..a028d75 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html @@ -1,4 +1,4 @@ -TensorFlow.Gradient

                                tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.Gradient

                                Synopsis

                                Documentation

                                gradients

                                Arguments

                                :: (MonadBuild m, Rendered v2, GradientCompatible a) 
                                => Tensor v1 a

                                The output of the graph.

                                -> [Tensor v2 a]

                                Tensors for which gradients are computed.

                                -> m [Tensor Value a] 

                                Gradient of y w.r.t. each element of xs.

                                \ No newline at end of file +

                                tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.Gradient

                                Synopsis

                                Documentation

                                gradients Source #

                                Arguments

                                :: (MonadBuild m, Rendered t, ToTensor t, GradientCompatible a) 
                                => Tensor v1 a

                                The output of the graph.

                                -> [t a]

                                Tensors for which gradients are computed.

                                -> m [Tensor Value a] 

                                Gradient of y w.r.t. each element of xs.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Minimize.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Minimize.html new file mode 100644 index 0000000..9e5630b --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Minimize.html @@ -0,0 +1,5 @@ +TensorFlow.Minimize

                                tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.Minimize

                                Documentation

                                type Minimizer a = forall m. MonadBuild m => [Variable a] -> [Tensor Value a] -> m ControlNode Source #

                                Functions that minimize a loss w.r.t. a set of Variables.

                                Generally only performs one step of an iterative algorithm.

                                Minimizers are defined as a function of the gradients instead of + the loss so that users can apply transformations to the gradients.

                                minimizeWith Source #

                                Arguments

                                :: (MonadBuild m, GradientCompatible a) 
                                => Minimizer a 
                                -> Tensor v a

                                Loss.

                                -> [Variable a]

                                Parameters of the loss function.

                                -> m ControlNode 

                                Convenience wrapper around gradients and a Minimizer.

                                gradientDescent Source #

                                Arguments

                                :: GradientCompatible a 
                                => a

                                Learning rate.

                                -> Minimizer a 

                                Perform one step of the gradient descent algorithm.

                                data AdamConfig Source #

                                Instances

                                Default AdamConfig Source # 

                                Methods

                                def :: AdamConfig

                                adam :: Minimizer Float Source #

                                Perform one step of the adam algorithm.

                                See https://arxiv.org/abs/1412.6980.

                                NOTE: Currently requires all Variables to have an initializedValue.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-NN.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-NN.html new file mode 100644 index 0000000..9fb04f2 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-NN.html @@ -0,0 +1,15 @@ +TensorFlow.NN

                                tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.NN

                                Documentation

                                sigmoidCrossEntropyWithLogits Source #

                                Arguments

                                :: (MonadBuild m, OneOf '[Float, Double] a, TensorType a, Num a) 
                                => Tensor Value a

                                logits

                                -> Tensor Value a

                                targets

                                -> m (Tensor Value a) 

                                Computes sigmoid cross entropy given logits.

                                Measures the probability error in discrete classification tasks in which each + class is independent and not mutually exclusive. For instance, one could + perform multilabel classification where a picture can contain both an elephant + and a dog at the same time.

                                For brevity, let `x = logits`, `z = targets`. The logistic loss is

                                z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) + = z * -log(1 (1 + exp(-x))) + (1 - z) * -log(exp(-x) (1 + exp(-x))) + = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) + = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) + = (1 - z) * x + log(1 + exp(-x)) + = x - x * z + log(1 + exp(-x))

                                For x < 0, to avoid overflow in exp(-x), we reformulate the above

                                x - x * z + log(1 + exp(-x)) + = log(exp(x)) - x * z + log(1 + exp(-x)) + = - x * z + log(1 + exp(x))

                                Hence, to ensure stability and avoid overflow, the implementation uses this + equivalent formulation

                                max(x, 0) - x * z + log(1 + exp(-abs(x)))

                                logits and targets must have the same type and shape.

                                \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html index d34f268..097f207 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html @@ -1,9 +1,9 @@ -TensorFlow.Ops

                                tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.Ops

                                Description

                                This module contains definitions for some built-in TensorFlow operations.

                                Note that certain, "stateful" ops like variable and assign return a - Build action (e.g., Build (Tensor Ref a) instead of a pure value; the - returned Tensors are always rendered in the current Build context. This +

                                tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                                Safe HaskellNone
                                LanguageHaskell2010

                                TensorFlow.Ops

                                Description

                                This module contains definitions for some built-in TensorFlow operations.

                                Note that certain, "stateful" ops like variable and assign return a + Build action (e.g., Build (Tensor Ref a) instead of a pure value; the + returned Tensors are always rendered in the current Build context. This approach helps us avoid problems with inlining or common subexpression elimination, by writing

                                do
                                     v <- variable []
                                @@ -16,45 +16,47 @@ in w * w

                                since the latter could be reasonably transformed by the compile v = variable [] w = assign v 3 w' = assign v 3 -in w * w'

                                Ops should return a Build action if their original OpDef marks them as +in w * w'

                                Ops should return a Build action if their original OpDef marks them as stateful, or if they take any Refs as input. (This mirrors the rules that - TensorFlow uses to avoid common subexpression elimination.)

                                Synopsis

                                Documentation

                                add

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t 
                                => Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                y

                                -> Tensor Build t

                                z

                                Returns x + y element-wise.

                                • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting - here

                                add'

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t 
                                => OpParams 
                                -> Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                y

                                -> Tensor Build t

                                z

                                abs

                                Arguments

                                :: forall (v'1 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t 
                                => Tensor v'1 t

                                x

                                -> Tensor Build t

                                y

                                Computes the absolute value of a tensor.

                                Given a tensor x, this operation returns a tensor containing the absolute + TensorFlow uses to avoid common subexpression elimination.)

                                Synopsis

                                Documentation

                                add #

                                Arguments

                                :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t 
                                => Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                y

                                -> Tensor Build t

                                z

                                Returns x + y element-wise.

                                • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting + here

                                add' #

                                Arguments

                                :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t 
                                => OpParams 
                                -> Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                y

                                -> Tensor Build t

                                z

                                abs #

                                Arguments

                                :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t 
                                => Tensor v'1 t

                                x

                                -> Tensor Build t

                                y

                                Computes the absolute value of a tensor.

                                Given a tensor x, this operation returns a tensor containing the absolute value of each element in x. For example, if x is an input element and y is - an output element, this operation computes \(y = |x|\).

                                abs'

                                Arguments

                                :: forall (v'1 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t 
                                => OpParams 
                                -> Tensor v'1 t

                                x

                                -> Tensor Build t

                                y

                                addN

                                Arguments

                                :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
                                => [Tensor v'1 t]

                                inputs: Must all be the same size and shape.

                                -> Tensor Build t

                                sum

                                Add all input tensors element wise.

                                addN'

                                Arguments

                                :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
                                => OpParams 
                                -> [Tensor v'1 t]

                                inputs: Must all be the same size and shape.

                                -> Tensor Build t

                                sum

                                argMax

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                                => Tensor v'1 t

                                input

                                -> Tensor v'2 tidx

                                dimension: int32, 0 <= dimension < rank(input). Describes which dimension - of the input Tensor to reduce across. For vectors, use dimension = 0.

                                -> Tensor Build Int64

                                output

                                Returns the index with the largest value across dimensions of a tensor.

                                argMax'

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                                => OpParams 
                                -> Tensor v'1 t

                                input

                                -> Tensor v'2 tidx

                                dimension: int32, 0 <= dimension < rank(input). Describes which dimension - of the input Tensor to reduce across. For vectors, use dimension = 0.

                                -> Tensor Build Int64

                                output

                                assign

                                Arguments

                                :: forall (v'2 :: * -> *) (m' :: * -> *). (MonadBuild m', TensorType t) 
                                => Tensor Ref t

                                ref: Should be from a Variable node. May be uninitialized.

                                -> Tensor v'2 t

                                value: The value to be assigned to the variable.

                                -> m' (Tensor Ref t)

                                output_ref: = Same as "ref". Returned as a convenience for operations that want - to use the new value after the variable has been reset.

                                Update ref by assigning value to it.

                                This operation outputs "ref" after the assignment is done. - This makes it easier to chain operations that need to use the reset value.

                                assign'

                                Arguments

                                :: forall (v'2 :: * -> *) (m' :: * -> *). (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Tensor Ref t

                                ref: Should be from a Variable node. May be uninitialized.

                                -> Tensor v'2 t

                                value: The value to be assigned to the variable.

                                -> m' (Tensor Ref t)

                                output_ref: = Same as "ref". Returned as a convenience for operations that want - to use the new value after the variable has been reset.

                                broadcastGradientArgs

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ([] *))) t 
                                => Tensor v'1 t

                                s0

                                -> Tensor v'2 t

                                s1

                                -> (Tensor Build t, Tensor Build t)

                                (r0, r1)

                                • r0
                                • r1

                                Return the reduction indices for computing gradients of s0 op s1 with broadcast.

                                This is typically used by gradient computations for a broadcasting operation.

                                broadcastGradientArgs'

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ([] *))) t 
                                => OpParams 
                                -> Tensor v'1 t

                                s0

                                -> Tensor v'2 t

                                s1

                                -> (Tensor Build t, Tensor Build t)

                                (r0, r1)

                                • r0
                                • r1

                                cast

                                Arguments

                                :: forall (v'1 :: * -> *). (TensorType srcT, TensorType dstT) 
                                => Tensor v'1 srcT

                                x

                                -> Tensor Build dstT

                                y

                                Cast x of type SrcT to y of DstT.

                                cast'

                                Arguments

                                :: forall (v'1 :: * -> *). (TensorType srcT, TensorType dstT) 
                                => OpParams 
                                -> Tensor v'1 srcT

                                x

                                -> Tensor Build dstT

                                y

                                concat

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). TensorType t 
                                => Tensor v'1 Int32

                                concat_dim: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

                                -> [Tensor v'2 t]

                                values: The N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

                                -> Tensor Build t

                                output: A Tensor with the concatenation of values stacked along the + an output element, this operation computes \(y = |x|\).

                                abs' #

                                Arguments

                                :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t 
                                => OpParams 
                                -> Tensor v'1 t

                                x

                                -> Tensor Build t

                                y

                                addN #

                                Arguments

                                :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
                                => [Tensor v'1 t]

                                inputs: Must all be the same size and shape.

                                -> Tensor Build t

                                sum

                                Add all input tensors element wise.

                                addN' #

                                Arguments

                                :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
                                => OpParams 
                                -> [Tensor v'1 t]

                                inputs: Must all be the same size and shape.

                                -> Tensor Build t

                                sum

                                argMax #

                                Arguments

                                :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) output_type) 
                                => Tensor v'1 t

                                input

                                -> Tensor v'2 tidx

                                dimension: int32 or int64, 0 <= dimension < rank(input). Describes + which dimension of the input Tensor to reduce across. For vectors, + use dimension = 0.

                                -> Tensor Build output_type

                                output

                                Returns the index with the largest value across dimensions of a tensor.

                                Note that in case of ties the identity of the return value is not guaranteed.

                                argMax' #

                                Arguments

                                :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) output_type) 
                                => OpParams 
                                -> Tensor v'1 t

                                input

                                -> Tensor v'2 tidx

                                dimension: int32 or int64, 0 <= dimension < rank(input). Describes + which dimension of the input Tensor to reduce across. For vectors, + use dimension = 0.

                                -> Tensor Build output_type

                                output

                                assign #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => Tensor Ref t

                                ref: Should be from a Variable node. May be uninitialized.

                                -> Tensor v'2 t

                                value: The value to be assigned to the variable.

                                -> m' (Tensor Ref t)

                                output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been reset.

                                Update ref by assigning value to it.

                                This operation outputs "ref" after the assignment is done. + This makes it easier to chain operations that need to use the reset value.

                                assign' #

                                Arguments

                                :: (MonadBuild m', TensorType t) 
                                => OpParams 
                                -> Tensor Ref t

                                ref: Should be from a Variable node. May be uninitialized.

                                -> Tensor v'2 t

                                value: The value to be assigned to the variable.

                                -> m' (Tensor Ref t)

                                output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been reset.

                                broadcastGradientArgs #

                                Arguments

                                :: OneOf ((:) * Int32 ((:) * Int64 ([] *))) t 
                                => Tensor v'1 t

                                s0

                                -> Tensor v'2 t

                                s1

                                -> (Tensor Build t, Tensor Build t)

                                (r0, r1)

                                • r0
                                • r1

                                Return the reduction indices for computing gradients of s0 op s1 with broadcast.

                                This is typically used by gradient computations for a broadcasting operation.

                                broadcastGradientArgs' #

                                Arguments

                                :: OneOf ((:) * Int32 ((:) * Int64 ([] *))) t 
                                => OpParams 
                                -> Tensor v'1 t

                                s0

                                -> Tensor v'2 t

                                s1

                                -> (Tensor Build t, Tensor Build t)

                                (r0, r1)

                                • r0
                                • r1

                                cast #

                                Arguments

                                :: (TensorType srcT, TensorType dstT) 
                                => Tensor v'1 srcT

                                x

                                -> Tensor Build dstT

                                y

                                Cast x of type SrcT to y of DstT.

                                cast' #

                                Arguments

                                :: (TensorType srcT, TensorType dstT) 
                                => OpParams 
                                -> Tensor v'1 srcT

                                x

                                -> Tensor Build dstT

                                y

                                concat #

                                Arguments

                                :: TensorType t 
                                => Tensor v'1 Int32

                                concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

                                -> [Tensor v'2 t]

                                values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

                                -> Tensor Build t

                                output: A Tensor with the concatenation of values stacked along the concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.

                                Concatenates tensors along one dimension.

                                concat'

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). TensorType t 
                                => OpParams 
                                -> Tensor v'1 Int32

                                concat_dim: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

                                -> [Tensor v'2 t]

                                values: The N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

                                -> Tensor Build t

                                output: A Tensor with the concatenation of values stacked along the + in concat_dim where it has the sum of the sizes.

                                Concatenates tensors along one dimension.

                                concat' #

                                Arguments

                                :: TensorType t 
                                => OpParams 
                                -> Tensor v'1 Int32

                                concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

                                -> [Tensor v'2 t]

                                values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

                                -> Tensor Build t

                                output: A Tensor with the concatenation of values stacked along the concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.

                                constant :: TensorType a => Shape -> [a] -> Tensor Build a

                                Create a constant tensor.

                                The values should be in row major order, e.g.,

                                element 0: index (0, ..., 0) + in concat_dim where it has the sum of the sizes.

                                constant :: TensorType a => Shape -> [a] -> Tensor Build a Source #

                                Create a constant tensor.

                                The values should be in row major order, e.g.,

                                element 0: index (0, ..., 0) element 1: index (0, ..., 1) - ...

                                constant' :: forall a. TensorType a => OpParams -> Shape -> [a] -> Tensor Build a

                                equal

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t 
                                => Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                y

                                -> Tensor Build Bool

                                z

                                Returns the truth value of (x == y) element-wise.

                                • NOTE*: Equal supports broadcasting. More about broadcasting - here

                                equal'

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t 
                                => OpParams 
                                -> Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                y

                                -> Tensor Build Bool

                                z

                                initializedVariable :: (MonadBuild m, TensorType a) => Tensor v a -> m (Tensor Ref a)

                                Creates a variable initialized to the given value. - Initialization happens next time session runs.

                                zeroInitializedVariable :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Tensor Ref a)

                                Creates a zero-initialized variable with the given shape.

                                fill

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). TensorType t 
                                => Tensor v'1 Int32

                                dims: 1-D. Represents the shape of the output tensor.

                                -> Tensor v'2 t

                                value: 0-D (scalar). Value to fill the returned tensor.

                                compatibility(numpy) + ...

                                constant' :: forall a. TensorType a => OpParams -> Shape -> [a] -> Tensor Build a Source #

                                equal #

                                Arguments

                                :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t 
                                => Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                y

                                -> Tensor Build Bool

                                z

                                Returns the truth value of (x == y) element-wise.

                                • NOTE*: Equal supports broadcasting. More about broadcasting + here

                                equal' #

                                Arguments

                                :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t 
                                => OpParams 
                                -> Tensor v'1 t

                                x

                                -> Tensor v'2 t

                                y

                                -> Tensor Build Bool

                                z

                                initializedVariable :: (MonadBuild m, TensorType a) => Tensor v a -> m (Tensor Ref a) Source #

                                Creates a variable initialized to the given value. + Initialization happens next time session runs.

                                zeroInitializedVariable :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Tensor Ref a) Source #

                                Creates a zero-initialized variable with the given shape.

                                fill #

                                Arguments

                                :: TensorType t 
                                => Tensor v'1 Int32

                                dims: 1-D. Represents the shape of the output tensor.

                                -> Tensor v'2 t

                                value: 0-D (scalar). Value to fill the returned tensor.

                                compatibility(numpy) Equivalent to np.full - end_compatibility

                                -> Tensor Build t

                                output

                                Creates a tensor filled with a scalar value.

                                This operation creates a tensor of shape dims and fills it with value.

                                For example:

                                ```prettyprint + end_compatibility

                                -> Tensor Build t

                                output

                                Creates a tensor filled with a scalar value.

                                This operation creates a tensor of shape dims and fills it with value.

                                For example:

                                ``` # Output tensor has shape [2, 3]. fill([2, 3], 9) ==> [[9, 9, 9] [9, 9, 9]] - ```

                                fill'

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). TensorType t 
                                => OpParams 
                                -> Tensor v'1 Int32

                                dims: 1-D. Represents the shape of the output tensor.

                                -> Tensor v'2 t

                                value: 0-D (scalar). Value to fill the returned tensor.

                                compatibility(numpy) + ```

                                fill' #

                                Arguments

                                :: TensorType t 
                                => OpParams 
                                -> Tensor v'1 Int32

                                dims: 1-D. Represents the shape of the output tensor.

                                -> Tensor v'2 t

                                value: 0-D (scalar). Value to fill the returned tensor.

                                compatibility(numpy) Equivalent to np.full - end_compatibility

                                -> Tensor Build t

                                output

                                identity

                                Arguments

                                :: forall (v'1 :: * -> *). TensorType t 
                                => Tensor v'1 t

                                input

                                -> Tensor Build t

                                output

                                Return a tensor with the same shape and contents as the input tensor or value.

                                identity'

                                Arguments

                                :: forall (v'1 :: * -> *). TensorType t 
                                => OpParams 
                                -> Tensor v'1 t

                                input

                                -> Tensor Build t

                                output

                                matMul

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t 
                                => Tensor v'1 t

                                a

                                -> Tensor v'2 t

                                b

                                -> Tensor Build t

                                product

                                Multiply the matrix "a" by the matrix "b".

                                The inputs must be two-dimensional matrices and the inner dimension of + end_compatibility

                                -> Tensor Build t

                                output

                                identity #

                                Arguments

                                :: TensorType t 
                                => Tensor v'1 t

                                input

                                -> Tensor Build t

                                output

                                Return a tensor with the same shape and contents as the input tensor or value.

                                identity' #

                                Arguments

                                :: TensorType t 
                                => OpParams 
                                -> Tensor v'1 t

                                input

                                -> Tensor Build t

                                output

                                matMul #

                                Arguments

                                :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t 
                                => Tensor v'1 t

                                a

                                -> Tensor v'2 t

                                b

                                -> Tensor Build t

                                product

                                Multiply the matrix "a" by the matrix "b".

                                The inputs must be two-dimensional matrices and the inner dimension of "a" (after being transposed if transpose_a is true) must match the outer dimension of "b" (after being transposed if transposed_b is true).

                                • Note*: The default kernel implementation for MatMul on GPUs uses - cublas.

                                matMul'

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t 
                                => OpParams 
                                -> Tensor v'1 t

                                a

                                -> Tensor v'2 t

                                b

                                -> Tensor Build t

                                product

                                mean

                                Arguments

                                :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                                => Tensor v'1 t

                                input: The tensor to reduce.

                                -> Tensor v'2 tidx

                                reduction_indices: The dimensions to reduce.

                                -> Tensor Build t

                                output: The reduced tensor.

                                Computes the mean of elements across dimensions of a tensor.

                                Reduces input along the dimensions given in reduction_indices. Unless + cublas.

                              matMul' #

                              Arguments

                              :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              a

                              -> Tensor v'2 t

                              b

                              -> Tensor Build t

                              product

                              mean #

                              Arguments

                              :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                              => Tensor v'1 t

                              input: The tensor to reduce.

                              -> Tensor v'2 tidx

                              reduction_indices: The dimensions to reduce.

                              -> Tensor Build t

                              output: The reduced tensor.

                              Computes the mean of elements across dimensions of a tensor.

                              Reduces input along the dimensions given in reduction_indices. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

                              mean'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                              => OpParams 
                              -> Tensor v'1 t

                              input: The tensor to reduce.

                              -> Tensor v'2 tidx

                              reduction_indices: The dimensions to reduce.

                              -> Tensor Build t

                              output: The reduced tensor.

                              mul

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              Returns x * y element-wise.

                              • NOTE*: Mul supports broadcasting. More about broadcasting - here

                              mul'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              neg

                              Arguments

                              :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => Tensor v'1 t

                              x

                              -> Tensor Build t

                              y

                              Computes numerical negative value element-wise.

                              I.e., \(y = -x\).

                              neg'

                              Arguments

                              :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor Build t

                              y

                              oneHot

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *) (v'4 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) 
                              => Tensor v'1 tI

                              indices: A tensor of indices.

                              -> Tensor v'2 Int32

                              depth: A scalar defining the depth of the one hot dimension.

                              -> Tensor v'3 t

                              on_value: A scalar defining the value to fill in output when `indices[j] = i`.

                              -> Tensor v'4 t

                              off_value: A scalar defining the value to fill in output when `indices[j] != i`.

                              -> Tensor Build t

                              output: The one-hot tensor.

                              Returns a one-hot tensor.

                              The locations represented by indices in indices take value on_value, + retained with length 1.

                              mean' #

                              Arguments

                              :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                              => OpParams 
                              -> Tensor v'1 t

                              input: The tensor to reduce.

                              -> Tensor v'2 tidx

                              reduction_indices: The dimensions to reduce.

                              -> Tensor Build t

                              output: The reduced tensor.

                              mul #

                              Arguments

                              :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              Returns x * y element-wise.

                              • NOTE*: Mul supports broadcasting. More about broadcasting + here

                              mul' #

                              Arguments

                              :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              neg #

                              Arguments

                              :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => Tensor v'1 t

                              x

                              -> Tensor Build t

                              y

                              Computes numerical negative value element-wise.

                              I.e., \(y = -x\).

                              neg' #

                              Arguments

                              :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor Build t

                              y

                              oneHot #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) 
                              => Tensor v'1 tI

                              indices: A tensor of indices.

                              -> Tensor v'2 Int32

                              depth: A scalar defining the depth of the one hot dimension.

                              -> Tensor v'3 t

                              on_value: A scalar defining the value to fill in output when `indices[j] = i`.

                              -> Tensor v'4 t

                              off_value: A scalar defining the value to fill in output when `indices[j] != i`.

                              -> Tensor Build t

                              output: The one-hot tensor.

                              Returns a one-hot tensor.

                              The locations represented by indices in indices take value on_value, while all other locations take value off_value.

                              If the input indices is rank N, the output will have rank `N+1`, The new axis is created at dimension axis (default: the new axis is appended at the end).

                              If indices is a scalar the output shape will be a vector of length depth.

                              If indices is a vector of length features, the output shape will be: @@ -108,30 +110,31 @@ in w * w'

                              Ops should return a

                              oneHot'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *) (v'4 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) 
                              => OpParams 
                              -> Tensor v'1 tI

                              indices: A tensor of indices.

                              -> Tensor v'2 Int32

                              depth: A scalar defining the depth of the one hot dimension.

                              -> Tensor v'3 t

                              on_value: A scalar defining the value to fill in output when `indices[j] = i`.

                              -> Tensor v'4 t

                              off_value: A scalar defining the value to fill in output when `indices[j] != i`.

                              -> Tensor Build t

                              output: The one-hot tensor.

                              pack

                              Arguments

                              :: forall (v'1 :: * -> *). TensorType t 
                              => [Tensor v'1 t]

                              values: Must be of same shape and type.

                              -> Tensor Build t

                              output: The packed tensor.

                              Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

                              Packs the N tensors in values into a tensor with rank one higher than each + ]```

                              oneHot' #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) 
                              => OpParams 
                              -> Tensor v'1 tI

                              indices: A tensor of indices.

                              -> Tensor v'2 Int32

                              depth: A scalar defining the depth of the one hot dimension.

                              -> Tensor v'3 t

                              on_value: A scalar defining the value to fill in output when `indices[j] = i`.

                              -> Tensor v'4 t

                              off_value: A scalar defining the value to fill in output when `indices[j] != i`.

                              -> Tensor Build t

                              output: The one-hot tensor.

                              pack #

                              Arguments

                              :: TensorType t 
                              => [Tensor v'1 t]

                              values: Must be of same shape and type.

                              -> Tensor Build t

                              output: The packed tensor.

                              Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

                              Packs the N tensors in values into a tensor with rank one higher than each tensor in values, by packing them along the axis dimension. Given a list of tensors of shape `(A, B, C)`;

                              if `axis == 0` then the output tensor will have the shape `(N, A, B, C)`. if `axis == 1` then the output tensor will have the shape `(A, N, B, C)`. - Etc.

                              For example:

                              ```prettyprint + Etc.

                              For example:

                              ``` # x is [1, 4] # y is [2, 5] # z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] - ```

                              This is the opposite of unpack.

                              pack'

                              Arguments

                              :: forall (v'1 :: * -> *). TensorType t 
                              => OpParams 
                              -> [Tensor v'1 t]

                              values: Must be of same shape and type.

                              -> Tensor Build t

                              output: The packed tensor.

                              placeholder' :: forall m a. (MonadBuild m, TensorType a) => OpParams -> Shape -> m (Tensor Value a)

                              range

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx 
                              => Tensor v'1 tidx

                              start: 0-D (scalar). First entry in the sequence.

                              -> Tensor v'2 tidx

                              limit: 0-D (scalar). Upper limit of sequence, exclusive.

                              -> Tensor v'3 tidx

                              delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

                              -> Tensor Build tidx

                              output: 1-D.

                              Creates a sequence of numbers.

                              This operation creates a sequence of numbers that begins at start and + ```

                              This is the opposite of unpack.

                              pack' #

                              Arguments

                              :: TensorType t 
                              => OpParams 
                              -> [Tensor v'1 t]

                              values: Must be of same shape and type.

                              -> Tensor Build t

                              output: The packed tensor.

                              placeholder' :: forall m a. (MonadBuild m, TensorType a) => OpParams -> Shape -> m (Tensor Value a) Source #

                              range #

                              Arguments

                              :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx 
                              => Tensor v'1 tidx

                              start: 0-D (scalar). First entry in the sequence.

                              -> Tensor v'2 tidx

                              limit: 0-D (scalar). Upper limit of sequence, exclusive.

                              -> Tensor v'3 tidx

                              delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

                              -> Tensor Build tidx

                              output: 1-D.

                              Creates a sequence of numbers.

                              This operation creates a sequence of numbers that begins at start and extends by increments of delta up to but not including limit.

                              For example:

                              ``` # start is 3 # limit is 18 # delta is 3 tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] - ```

                              range'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx 
                              => OpParams 
                              -> Tensor v'1 tidx

                              start: 0-D (scalar). First entry in the sequence.

                              -> Tensor v'2 tidx

                              limit: 0-D (scalar). Upper limit of sequence, exclusive.

                              -> Tensor v'3 tidx

                              delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

                              -> Tensor Build tidx

                              output: 1-D.

                              reducedShape :: (OneOf `[Int32, Int64]` t1, OneOf `[Int32, Int64]` t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Build Int32

                              Helper function for reduction ops (translation of math_ops.reduced_shape).

                              relu

                              Arguments

                              :: forall (v'1 :: * -> *). OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
                              => Tensor v'1 t

                              features

                              -> Tensor Build t

                              activations

                              Computes rectified linear: `max(features, 0)`.

                              relu'

                              Arguments

                              :: forall (v'1 :: * -> *). OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              features

                              -> Tensor Build t

                              activations

                              reluGrad

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
                              => Tensor v'1 t

                              gradients: The backpropagated gradients to the corresponding Relu operation.

                              -> Tensor v'2 t

                              features: The features passed as input to the corresponding Relu operation, OR - the outputs of that operation (both work equivalently).

                              -> Tensor Build t

                              backprops: `gradients * (features > 0)`.

                              Computes rectified linear gradients for a Relu operation.

                              reluGrad'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              gradients: The backpropagated gradients to the corresponding Relu operation.

                              -> Tensor v'2 t

                              features: The features passed as input to the corresponding Relu operation, OR - the outputs of that operation (both work equivalently).

                              -> Tensor Build t

                              backprops: `gradients * (features > 0)`.

                              reshape

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) 
                              => Tensor v'1 t

                              tensor

                              -> Tensor v'2 tshape

                              shape: Defines the shape of the output tensor.

                              -> Tensor Build t

                              output

                              Reshapes a tensor.

                              Given tensor, this operation returns a tensor that has the same values + ```

                              range' #

                              Arguments

                              :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx 
                              => OpParams 
                              -> Tensor v'1 tidx

                              start: 0-D (scalar). First entry in the sequence.

                              -> Tensor v'2 tidx

                              limit: 0-D (scalar). Upper limit of sequence, exclusive.

                              -> Tensor v'3 tidx

                              delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

                              -> Tensor Build tidx

                              output: 1-D.

                              reducedShape :: (OneOf '[Int32, Int64] t1, OneOf '[Int32, Int64] t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Build Int32 Source #

                              Helper function for reduction ops (translation of math_ops.reduced_shape).

                              reduceMean :: (TensorType a, OneOf '[Double, Float, Complex Float, Complex Double] a) => Tensor v a -> Tensor Build a Source #

                              Computes the mean of elements across dimensions of a tensor. + See mean

                              relu #

                              Arguments

                              :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
                              => Tensor v'1 t

                              features

                              -> Tensor Build t

                              activations

                              Computes rectified linear: `max(features, 0)`.

                              relu' #

                              Arguments

                              :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              features

                              -> Tensor Build t

                              activations

                              reluGrad #

                              Arguments

                              :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
                              => Tensor v'1 t

                              gradients: The backpropagated gradients to the corresponding Relu operation.

                              -> Tensor v'2 t

                              features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

                              -> Tensor Build t

                              backprops: `gradients * (features > 0)`.

                              Computes rectified linear gradients for a Relu operation.

                              reluGrad' #

                              Arguments

                              :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              gradients: The backpropagated gradients to the corresponding Relu operation.

                              -> Tensor v'2 t

                              features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

                              -> Tensor Build t

                              backprops: `gradients * (features > 0)`.

                              reshape #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) 
                              => Tensor v'1 t

                              tensor

                              -> Tensor v'2 tshape

                              shape: Defines the shape of the output tensor.

                              -> Tensor Build t

                              output

                              Reshapes a tensor.

                              Given tensor, this operation returns a tensor that has the same values as tensor with shape shape.

                              If one component of shape is the special value -1, the size of that dimension is computed so that the total size remains constant. In particular, a shape of `[-1]` flattens into 1-D. At most one component of shape can be -1.

                              If shape is 1-D or higher, then the operation returns a tensor with shape shape filled with the values of tensor. In this case, the number of elements - implied by shape must be the same as the number of elements in tensor.

                              For example:

                              ```prettyprint + implied by shape must be the same as the number of elements in tensor.

                              For example:

                              ``` # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] # tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], @@ -163,19 +166,19 @@ in w * w'

                              Ops should return a

                              reshape'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) 
                              => OpParams 
                              -> Tensor v'1 t

                              tensor

                              -> Tensor v'2 tshape

                              shape: Defines the shape of the output tensor.

                              -> Tensor Build t

                              output

                              restore

                              Arguments

                              :: (MonadBuild m, TensorType a) 
                              => ByteString

                              File path.

                              -> Tensor Ref a

                              Tensor to restore.

                              -> m ControlNode 

                              Restore a tensor's value from a checkpoint file.

                              restoreFromName

                              Arguments

                              :: (MonadBuild m, TensorType a) 
                              => ByteString

                              File path.

                              -> ByteString

                              Tensor name override.

                              -> Tensor Ref a

                              Tensor to restore.

                              -> m ControlNode 

                              Restore a tensor's value from a checkpoint file.

                              This version allows restoring from a checkpoint file that uses a different - tensor name than the variable.

                              save

                              Arguments

                              :: (Rendered v, MonadBuild m, TensorType a) 
                              => ByteString

                              File path.

                              -> [Tensor v a]

                              Tensors to save.

                              -> m ControlNode 

                              scalar :: TensorType a => a -> Tensor Build a

                              Create a constant scalar.

                              sign

                              Arguments

                              :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => Tensor v'1 t

                              x

                              -> Tensor Build t

                              y

                              Returns an element-wise indication of the sign of a number.

                              `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

                              For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

                              sign'

                              Arguments

                              :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor Build t

                              y

                              size

                              Arguments

                              :: forall (v'1 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) 
                              => Tensor v'1 t

                              input

                              -> Tensor Build out_type

                              output

                              Returns the size of a tensor.

                              This operation returns an integer representing the number of elements in - input.

                              For example:

                              ```prettyprint + ```

                              reshape' #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) 
                              => OpParams 
                              -> Tensor v'1 t

                              tensor

                              -> Tensor v'2 tshape

                              shape: Defines the shape of the output tensor.

                              -> Tensor Build t

                              output

                              restore Source #

                              Arguments

                              :: (MonadBuild m, TensorType a) 
                              => ByteString

                              File path.

                              -> Tensor Ref a

                              Tensor to restore.

                              -> m ControlNode 

                              Restore a tensor's value from a checkpoint file.

                              restoreFromName Source #

                              Arguments

                              :: (MonadBuild m, TensorType a) 
                              => ByteString

                              File path.

                              -> ByteString

                              Tensor name override.

                              -> Tensor Ref a

                              Tensor to restore.

                              -> m ControlNode 

                              Restore a tensor's value from a checkpoint file.

                              This version allows restoring from a checkpoint file that uses a different + tensor name than the variable.

                              save Source #

                              Arguments

                              :: (Rendered (Tensor v), MonadBuild m, TensorType a) 
                              => ByteString

                              File path.

                              -> [Tensor v a]

                              Tensors to save.

                              -> m ControlNode 

                              scalar :: TensorType a => a -> Tensor Build a Source #

                              Create a constant scalar.

                              sign #

                              Arguments

                              :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => Tensor v'1 t

                              x

                              -> Tensor Build t

                              y

                              Returns an element-wise indication of the sign of a number.

                              `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

                              For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

                              sign' #

                              Arguments

                              :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor Build t

                              y

                              size #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) 
                              => Tensor v'1 t

                              input

                              -> Tensor Build out_type

                              output

                              Returns the size of a tensor.

                              This operation returns an integer representing the number of elements in + input.

                              For example:

                              ``` # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] size(t) ==> 12 - ```

                              size'

                              Arguments

                              :: forall (v'1 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) 
                              => OpParams 
                              -> Tensor v'1 t

                              input

                              -> Tensor Build out_type

                              output

                              softmax

                              Arguments

                              :: forall (v'1 :: * -> *). OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
                              => Tensor v'1 t

                              logits: 2-D with shape `[batch_size, num_classes]`.

                              -> Tensor Build t

                              softmax: Same shape as logits.

                              Computes softmax activations.

                              For each batch i and class j we have

                              softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

                              softmax'

                              Arguments

                              :: forall (v'1 :: * -> *). OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              logits: 2-D with shape `[batch_size, num_classes]`.

                              -> Tensor Build t

                              softmax: Same shape as logits.

                              softmaxCrossEntropyWithLogits

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
                              => Tensor v'1 t

                              features: batch_size x num_classes matrix

                              -> Tensor v'2 t

                              labels: batch_size x num_classes matrix + ```

                              size' #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) 
                              => OpParams 
                              -> Tensor v'1 t

                              input

                              -> Tensor Build out_type

                              output

                              softmax #

                              Arguments

                              :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
                              => Tensor v'1 t

                              logits: 2-D with shape `[batch_size, num_classes]`.

                              -> Tensor Build t

                              softmax: Same shape as logits.

                              Computes softmax activations.

                              For each batch i and class j we have

                              softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

                              softmax' #

                              Arguments

                              :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              logits: 2-D with shape `[batch_size, num_classes]`.

                              -> Tensor Build t

                              softmax: Same shape as logits.

                              softmaxCrossEntropyWithLogits #

                              Arguments

                              :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
                              => Tensor v'1 t

                              features: batch_size x num_classes matrix

                              -> Tensor v'2 t

                              labels: batch_size x num_classes matrix The caller must ensure that each batch of labels represents a valid - probability distribution.

                              -> (Tensor Build t, Tensor Build t)

                              (loss, backprop)

                              • loss: Per example loss (batch_size vector).
                              • backprop: backpropagated gradients (batch_size x num_classes matrix).

                              Computes softmax cross entropy cost and gradients to backpropagate.

                              Inputs are the logits, not probabilities.

                              softmaxCrossEntropyWithLogits'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              features: batch_size x num_classes matrix

                              -> Tensor v'2 t

                              labels: batch_size x num_classes matrix + probability distribution.

                              -> (Tensor Build t, Tensor Build t)

                              (loss, backprop)

                              • loss: Per example loss (batch_size vector).
                              • backprop: backpropagated gradients (batch_size x num_classes matrix).

                              Computes softmax cross entropy cost and gradients to backpropagate.

                              Inputs are the logits, not probabilities.

                              softmaxCrossEntropyWithLogits' #

                              Arguments

                              :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              features: batch_size x num_classes matrix

                              -> Tensor v'2 t

                              labels: batch_size x num_classes matrix The caller must ensure that each batch of labels represents a valid - probability distribution.

                              -> (Tensor Build t, Tensor Build t)

                              (loss, backprop)

                              • loss: Per example loss (batch_size vector).
                              • backprop: backpropagated gradients (batch_size x num_classes matrix).

                              sparseToDense

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *) (v'4 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) 
                              => Tensor v'1 tindices

                              sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete - index where `sparse_values[i]` will be placed.

                              -> Tensor v'2 tindices

                              output_shape: 1-D. Shape of the dense output tensor.

                              -> Tensor v'3 t

                              sparse_values: 1-D. Values corresponding to each row of sparse_indices, - or a scalar value to be used for all sparse indices.

                              -> Tensor v'4 t

                              default_value: Scalar value to set for indices not specified in - sparse_indices.

                              -> Tensor Build t

                              dense: Dense output tensor of shape output_shape.

                              Converts a sparse representation into a dense tensor.

                              Builds an array dense with shape output_shape such that

                              ```prettyprint + probability distribution.

                              -> (Tensor Build t, Tensor Build t)

                              (loss, backprop)

                              • loss: Per example loss (batch_size vector).
                              • backprop: backpropagated gradients (batch_size x num_classes matrix).

                              sparseToDense #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) 
                              => Tensor v'1 tindices

                              sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

                              -> Tensor v'2 tindices

                              output_shape: 1-D. Shape of the dense output tensor.

                              -> Tensor v'3 t

                              sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

                              -> Tensor v'4 t

                              default_value: Scalar value to set for indices not specified in + sparse_indices.

                              -> Tensor Build t

                              dense: Dense output tensor of shape output_shape.

                              Converts a sparse representation into a dense tensor.

                              Builds an array dense with shape output_shape such that

                              ``` # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value)

                              # If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i]

                              # If sparse_indices is an n by d matrix, then for each i in [0, n) @@ -183,12 +186,16 @@ in w * w'

                              Ops should return a

                              sparseToDense'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *) (v'4 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) 
                              => OpParams 
                              -> Tensor v'1 tindices

                              sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete - index where `sparse_values[i]` will be placed.

                              -> Tensor v'2 tindices

                              output_shape: 1-D. Shape of the dense output tensor.

                              -> Tensor v'3 t

                              sparse_values: 1-D. Values corresponding to each row of sparse_indices, - or a scalar value to be used for all sparse indices.

                              -> Tensor v'4 t

                              default_value: Scalar value to set for indices not specified in - sparse_indices.

                              -> Tensor Build t

                              dense: Dense output tensor of shape output_shape.

                              sub

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              Returns x - y element-wise.

                              • NOTE*: Sub supports broadcasting. More about broadcasting - here

                              sub'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              sum

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                              => Tensor v'1 t

                              input: The tensor to reduce.

                              -> Tensor v'2 tidx

                              reduction_indices: The dimensions to reduce.

                              -> Tensor Build t

                              output: The reduced tensor.

                              Computes the sum of elements across dimensions of a tensor.

                              Reduces input along the dimensions given in reduction_indices. Unless + are checked during execution.

                              sparseToDense' #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) 
                              => OpParams 
                              -> Tensor v'1 tindices

                              sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

                              -> Tensor v'2 tindices

                              output_shape: 1-D. Shape of the dense output tensor.

                              -> Tensor v'3 t

                              sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

                              -> Tensor v'4 t

                              default_value: Scalar value to set for indices not specified in + sparse_indices.

                              -> Tensor Build t

                              dense: Dense output tensor of shape output_shape.

                              sub #

                              Arguments

                              :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              Returns x - y element-wise.

                              • NOTE*: Sub supports broadcasting. More about broadcasting + here

                              sub' #

                              Arguments

                              :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 t

                              y

                              -> Tensor Build t

                              z

                              sum #

                              Arguments

                              :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                              => Tensor v'1 t

                              input: The tensor to reduce.

                              -> Tensor v'2 tidx

                              reduction_indices: The dimensions to reduce.

                              -> Tensor Build t

                              output: The reduced tensor.

                              Computes the sum of elements across dimensions of a tensor.

                              Reduces input along the dimensions given in reduction_indices. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

                              sum'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                              => OpParams 
                              -> Tensor v'1 t

                              input: The tensor to reduce.

                              -> Tensor v'2 tidx

                              reduction_indices: The dimensions to reduce.

                              -> Tensor Build t

                              output: The reduced tensor.

                              transpose

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 tperm

                              perm

                              -> Tensor Build t

                              y

                              Shuffle dimensions of x according to a permutation.

                              The output y has the same rank as x. The shapes of x and y satisfy: - `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

                              transpose'

                              Arguments

                              :: forall (v'1 :: * -> *) (v'2 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 tperm

                              perm

                              -> Tensor Build t

                              y

                              truncatedNormal

                              Arguments

                              :: (MonadBuild m, OneOf `[Word16, Double, Float]` a) 
                              => Tensor v Int64

                              Shape.

                              -> m (Tensor Value a) 

                              Random tensor from the unit normal distribution with bounded values.

                              This is a type-restricted version of truncatedNormal.

                              truncatedNormal'

                              Arguments

                              :: (MonadBuild m, OneOf `[Word16, Double, Float]` a) 
                              => OpParams 
                              -> Tensor v Int64

                              Shape.

                              -> m (Tensor Value a) 

                              variable

                              Arguments

                              :: forall (m' :: * -> *). (MonadBuild m', TensorType dtype) 
                              => Shape

                              shape

                              -> m' (Tensor Ref dtype)

                              ref

                              Use VariableV2 instead.

                              variable'

                              Arguments

                              :: forall (m' :: * -> *). (MonadBuild m', TensorType dtype) 
                              => OpParams 
                              -> Shape

                              shape

                              -> m' (Tensor Ref dtype)

                              ref

                              vector :: TensorType a => [a] -> Tensor Build a

                              Create a constant vector.

                              vector' :: TensorType a => OpParams -> [a] -> Tensor Build a

                              zeros :: forall a. (Num a, TensorType a) => Shape -> Tensor Build a

                              zerosLike

                              Arguments

                              :: forall (v'1 :: * -> *). TensorType t 
                              => Tensor v'1 t

                              x: a tensor of type T.

                              -> Tensor Build t

                              y: a tensor of the same shape and type as x but filled with zeros.

                              Returns a tensor of zeros with the same shape and type as x.

                              zerosLike'

                              Arguments

                              :: forall (v'1 :: * -> *). TensorType t 
                              => OpParams 
                              -> Tensor v'1 t

                              x: a tensor of type T.

                              -> Tensor Build t

                              y: a tensor of the same shape and type as x but filled with zeros.

                              scalarize :: TensorType a => Tensor v a -> Tensor Build a

                              Reshape a N-D tensor down to a scalar.

                              See reshape.

                              \ No newline at end of file + retained with length 1.

                              sum' #

                              Arguments

                              :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
                              => OpParams 
                              -> Tensor v'1 t

                              input: The tensor to reduce.

                              -> Tensor v'2 tidx

                              reduction_indices: The dimensions to reduce.

                              -> Tensor Build t

                              output: The reduced tensor.

                              reduceSum :: OneOf '[Double, Float, Int32, Int64, Complex Float, Complex Double] a => Tensor v a -> Tensor Build a Source #

                              Sum a tensor down to a scalar + Seee sum

                              transpose #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) 
                              => Tensor v'1 t

                              x

                              -> Tensor v'2 tperm

                              perm

                              -> Tensor Build t

                              y

                              Shuffle dimensions of x according to a permutation.

                              The output y has the same rank as x. The shapes of x and y satisfy: + `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

                              transpose' #

                              Arguments

                              :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) 
                              => OpParams 
                              -> Tensor v'1 t

                              x

                              -> Tensor v'2 tperm

                              perm

                              -> Tensor Build t

                              y

                              truncatedNormal Source #

                              Arguments

                              :: (MonadBuild m, OneOf '[Word16, Double, Float] a) 
                              => Tensor v Int64

                              Shape.

                              -> m (Tensor Value a) 

                              Random tensor from the unit normal distribution with bounded values.

                              This is a type-restricted version of truncatedNormal.

                              truncatedNormal' Source #

                              Arguments

                              :: (MonadBuild m, OneOf '[Word16, Double, Float] a) 
                              => OpParams 
                              -> Tensor v Int64

                              Shape.

                              -> m (Tensor Value a) 

                              variable #

                              Arguments

                              :: (MonadBuild m', TensorType dtype) 
                              => Shape

                              shape

                              -> m' (Tensor Ref dtype)

                              ref

                              Use VariableV2 instead.

                              variable' #

                              Arguments

                              :: (MonadBuild m', TensorType dtype) 
                              => OpParams 
                              -> Shape

                              shape

                              -> m' (Tensor Ref dtype)

                              ref

                              vector :: TensorType a => [a] -> Tensor Build a Source #

                              Create a constant vector.

                              zeros :: forall a. (Num a, TensorType a) => Shape -> Tensor Build a Source #

                              zerosLike #

                              Arguments

                              :: TensorType t 
                              => Tensor v'1 t

                              x: a tensor of type T.

                              -> Tensor Build t

                              y: a tensor of the same shape and type as x but filled with zeros.

                              Returns a tensor of zeros with the same shape and type as x.

                              zerosLike' #

                              Arguments

                              :: TensorType t 
                              => OpParams 
                              -> Tensor v'1 t

                              x: a tensor of type T.

                              -> Tensor Build t

                              y: a tensor of the same shape and type as x but filled with zeros.

                              scalarize :: TensorType a => Tensor v a -> Tensor Build a Source #

                              Reshape a N-D tensor down to a scalar.

                              See reshape.

                              Orphan instances

                              (TensorType a, Num a, (~) (* -> *) v Build, OneOf ((:) * Double ((:) * Float ((:) * Int32 ((:) * Int64 ((:) * (Complex Float) ((:) * (Complex Double) ([] *))))))) a) => Num (Tensor v a) Source #

                              Must be defined as an orphan because of the dependency order between Ops + and Tensor.

                              The indirect constraint "v ~ Value" helps disambiguate types, for example in + "neg 1 :: Tensor Value Float", it helps find the type of the subexpression + "1".

                              Methods

                              (+) :: Tensor v a -> Tensor v a -> Tensor v a #

                              (-) :: Tensor v a -> Tensor v a -> Tensor v a #

                              (*) :: Tensor v a -> Tensor v a -> Tensor v a #

                              negate :: Tensor v a -> Tensor v a #

                              abs :: Tensor v a -> Tensor v a #

                              signum :: Tensor v a -> Tensor v a #

                              fromInteger :: Integer -> Tensor v a #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Queue.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Queue.html new file mode 100644 index 0000000..eb78601 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Queue.html @@ -0,0 +1,8 @@ +TensorFlow.Queue

                              tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                              Safe HaskellNone
                              LanguageHaskell2010

                              TensorFlow.Queue

                              Description

                              Queues in TensorFlow graph. Very limited support for now.

                              Synopsis

                              Documentation

                              data Queue as Source #

                              A queue carrying tuples.

                              makeQueue Source #

                              Arguments

                              :: (MonadBuild m, TensorTypes as) 
                              => Int64

                              The upper bound on the number of elements in + this queue. Negative numbers mean no limit.

                              -> ByteString

                              If non-empty, this queue will be shared + under the given name across multiple sessions.

                              -> m (Queue as) 

                              Creates a new queue with the given capacity and shared name.

                              enqueue :: forall as v m. (MonadBuild m, TensorTypes as) => Queue as -> TensorList v as -> m ControlNode Source #

                              Adds the given values to the queue.

                              dequeue Source #

                              Arguments

                              :: (MonadBuild m, TensorTypes as) 
                              => Queue as 
                              -> m (TensorList Value as)

                              Dequeued tensors. They are coupled in a sense + that values appear together, even if they are + not consumed together.

                              Retrieves the values from the queue.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Variable.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Variable.html new file mode 100644 index 0000000..85d0450 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Variable.html @@ -0,0 +1,19 @@ +TensorFlow.Variable

                              tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                              Safe HaskellNone
                              LanguageHaskell2010

                              TensorFlow.Variable

                              Description

                              An implementation of ResourceHandle-based variables.

                              The main difference between this and Ref-based variables is + that reads are explicit, via the readValue op.

                              TODO: given that distinction, figure out a good story around + gradients and save/restore. Then, merge this module into + TensorFlow.Ops.

                              Synopsis

                              Documentation

                              variable :: (MonadBuild m, TensorType a) => Shape -> m (Variable a) Source #

                              Creates a new, uninitialized variable.

                              variable' :: forall m a. (MonadBuild m, TensorType a) => OpParams -> Shape -> m (Variable a) Source #

                              readValue :: TensorType a => Variable a -> Tensor Build a Source #

                              Gets the value stored in a variable.

                              Note that this op is stateful since it depends on the value of the variable; + however, it may be CSE'd with other reads in the same context. The context can + be fixed by using render along with (for example) withControlDependencies. + For example:

                                runSession $ do
                              +    v <- variable []
                              +    a <- assign v 24
                              +    r <- withControlDependencies a $ render $ readValue v + 18
                              +    result <- run r
                              +    liftIO $ (42 :: Float) @=? unScalar result

                              initializedValue :: Variable a -> Maybe (Tensor Value a) Source #

                              The initial value of a Variable created with initializedVariable.

                              initializedVariable :: (MonadBuild m, TensorType a) => Tensor v a -> m (Variable a) Source #

                              Creates a variable initialized to the given value. + Initialization happens next time session runs.

                              initializedVariable' :: forall a m v. (MonadBuild m, TensorType a) => OpParams -> Tensor v a -> m (Variable a) Source #

                              zeroInitializedVariable :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Variable a) Source #

                              Creates a zero-initialized variable with the given shape.

                              assign :: (MonadBuild m, TensorType a) => Variable a -> Tensor v a -> m ControlNode Source #

                              Sets the value of a variable.

                              assignAdd :: (MonadBuild m, TensorType a) => Variable a -> Tensor v a -> m ControlNode Source #

                              Increments the value of a variable.

                              resourceApplyAdam Source #

                              Arguments

                              :: (MonadBuild m, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                              => Variable t

                              var: Should be from a Variable().

                              -> Variable t

                              m: Should be from a Variable().

                              -> Variable t

                              v: Should be from a Variable().

                              -> Tensor v1 t

                              beta1_power: Must be a scalar.

                              -> Tensor v2 t

                              beta2_power: Must be a scalar.

                              -> Tensor v3 t

                              lr: Scaling factor. Must be a scalar.

                              -> Tensor v4 t

                              beta1: Momentum factor. Must be a scalar.

                              -> Tensor v5 t

                              beta2: Momentum factor. Must be a scalar.

                              -> Tensor v6 t

                              epsilon: Ridge term. Must be a scalar.

                              -> Tensor v7 t

                              grad: The gradient.

                              -> m ControlNode 

                              Update '*var' according to the Adam algorithm.

                              lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) + m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t + v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t + variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)

                              resourceApplyAdam' Source #

                              Arguments

                              :: (MonadBuild m, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) 
                              => OpParams 
                              -> Variable t

                              var: Should be from a Variable().

                              -> Variable t

                              m: Should be from a Variable().

                              -> Variable t

                              v: Should be from a Variable().

                              -> Tensor v1 t

                              beta1_power: Must be a scalar.

                              -> Tensor v2 t

                              beta2_power: Must be a scalar.

                              -> Tensor v3 t

                              lr: Scaling factor. Must be a scalar.

                              -> Tensor v4 t

                              beta1: Momentum factor. Must be a scalar.

                              -> Tensor v5 t

                              beta2: Momentum factor. Must be a scalar.

                              -> Tensor v6 t

                              epsilon: Ridge term. Must be a scalar.

                              -> Tensor v7 t

                              grad: The gradient.

                              -> m ControlNode 
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html index d766ab4..e4802d2 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings. (Index)

                              tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                              Index

                              absTensorFlow.Ops
                              abs'TensorFlow.Ops
                              addTensorFlow.Ops
                              add'TensorFlow.Ops
                              addNTensorFlow.Ops
                              addN'TensorFlow.Ops
                              argMaxTensorFlow.Ops
                              argMax'TensorFlow.Ops
                              assignTensorFlow.Ops
                              assign'TensorFlow.Ops
                              broadcastGradientArgsTensorFlow.Ops
                              broadcastGradientArgs'TensorFlow.Ops
                              castTensorFlow.Ops
                              cast'TensorFlow.Ops
                              concatTensorFlow.Ops
                              concat'TensorFlow.Ops
                              constantTensorFlow.Ops
                              constant'TensorFlow.Ops
                              embeddingLookupTensorFlow.EmbeddingOps
                              equalTensorFlow.Ops
                              equal'TensorFlow.Ops
                              expandDimsTensorFlow.Ops
                              expandDims'TensorFlow.Ops
                              fillTensorFlow.Ops
                              fill'TensorFlow.Ops
                              gradientsTensorFlow.Gradient
                              identityTensorFlow.Ops
                              identity'TensorFlow.Ops
                              initializedVariableTensorFlow.Ops
                              initializedVariable'TensorFlow.Ops
                              matMulTensorFlow.Ops
                              matMul'TensorFlow.Ops
                              matTransposeTensorFlow.Ops
                              matTranspose'TensorFlow.Ops
                              meanTensorFlow.Ops
                              mean'TensorFlow.Ops
                              mulTensorFlow.Ops
                              mul'TensorFlow.Ops
                              negTensorFlow.Ops
                              neg'TensorFlow.Ops
                              oneHotTensorFlow.Ops
                              oneHot'TensorFlow.Ops
                              packTensorFlow.Ops
                              pack'TensorFlow.Ops
                              placeholderTensorFlow.Ops
                              placeholder'TensorFlow.Ops
                              rangeTensorFlow.Ops
                              range'TensorFlow.Ops
                              reducedShapeTensorFlow.Ops
                              reluTensorFlow.Ops
                              relu'TensorFlow.Ops
                              reluGradTensorFlow.Ops
                              reluGrad'TensorFlow.Ops
                              reshapeTensorFlow.Ops
                              reshape'TensorFlow.Ops
                              restoreTensorFlow.Ops
                              restoreFromNameTensorFlow.Ops
                              saveTensorFlow.Ops
                              scalarTensorFlow.Ops
                              scalar'TensorFlow.Ops
                              scalarizeTensorFlow.Ops
                              shapeTensorFlow.Ops
                              shape'TensorFlow.Ops
                              signTensorFlow.Ops
                              sign'TensorFlow.Ops
                              sizeTensorFlow.Ops
                              size'TensorFlow.Ops
                              softmaxTensorFlow.Ops
                              softmax'TensorFlow.Ops
                              softmaxCrossEntropyWithLogitsTensorFlow.Ops
                              softmaxCrossEntropyWithLogits'TensorFlow.Ops
                              sparseToDenseTensorFlow.Ops
                              sparseToDense'TensorFlow.Ops
                              subTensorFlow.Ops
                              sub'TensorFlow.Ops
                              sumTensorFlow.Ops
                              sum'TensorFlow.Ops
                              transposeTensorFlow.Ops
                              transpose'TensorFlow.Ops
                              truncatedNormalTensorFlow.Ops
                              truncatedNormal'TensorFlow.Ops
                              variableTensorFlow.Ops
                              variable'TensorFlow.Ops
                              vectorTensorFlow.Ops
                              vector'TensorFlow.Ops
                              zeroInitializedVariableTensorFlow.Ops
                              zeroInitializedVariable'TensorFlow.Ops
                              zerosTensorFlow.Ops
                              zerosLikeTensorFlow.Ops
                              zerosLike'TensorFlow.Ops
                              \ No newline at end of file +

                              tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                              Index

                              absTensorFlow.Ops
                              abs'TensorFlow.Ops
                              adamTensorFlow.Minimize
                              adam'TensorFlow.Minimize
                              adamBeta1TensorFlow.Minimize
                              adamBeta2TensorFlow.Minimize
                              AdamConfig 
                              1 (Type/Class)TensorFlow.Minimize
                              2 (Data Constructor)TensorFlow.Minimize
                              adamEpsilonTensorFlow.Minimize
                              adamLearningRateTensorFlow.Minimize
                              addTensorFlow.Ops
                              add'TensorFlow.Ops
                              addNTensorFlow.Ops
                              addN'TensorFlow.Ops
                              argMaxTensorFlow.Ops
                              argMax'TensorFlow.Ops
                              assign 
                              1 (Function)TensorFlow.Ops
                              2 (Function)TensorFlow.Variable
                              assign' 
                              1 (Function)TensorFlow.Ops
                              2 (Function)TensorFlow.Variable
                              assignAddTensorFlow.Variable
                              assignAdd'TensorFlow.Variable
                              broadcastGradientArgsTensorFlow.Ops
                              broadcastGradientArgs'TensorFlow.Ops
                              castTensorFlow.Ops
                              cast'TensorFlow.Ops
                              concatTensorFlow.Ops
                              concat'TensorFlow.Ops
                              constantTensorFlow.Ops
                              constant'TensorFlow.Ops
                              dequeueTensorFlow.Queue
                              embeddingLookupTensorFlow.EmbeddingOps
                              enqueueTensorFlow.Queue
                              equalTensorFlow.Ops
                              equal'TensorFlow.Ops
                              expandDimsTensorFlow.Ops
                              expandDims'TensorFlow.Ops
                              fillTensorFlow.Ops
                              fill'TensorFlow.Ops
                              GradientCompatibleTensorFlow.Gradient
                              gradientDescentTensorFlow.Minimize
                              gradientsTensorFlow.Gradient
                              identityTensorFlow.Ops
                              identity'TensorFlow.Ops
                              initializedValueTensorFlow.Variable
                              initializedVariable 
                              1 (Function)TensorFlow.Ops
                              2 (Function)TensorFlow.Variable
                              initializedVariable' 
                              1 (Function)TensorFlow.Ops
                              2 (Function)TensorFlow.Variable
                              makeQueueTensorFlow.Queue
                              matMulTensorFlow.Ops
                              matMul'TensorFlow.Ops
                              matTransposeTensorFlow.Ops
                              matTranspose'TensorFlow.Ops
                              meanTensorFlow.Ops
                              mean'TensorFlow.Ops
                              MinimizerTensorFlow.Minimize
                              minimizeWithTensorFlow.Minimize
                              mulTensorFlow.Ops
                              mul'TensorFlow.Ops
                              negTensorFlow.Ops
                              neg'TensorFlow.Ops
                              oneHotTensorFlow.Ops
                              oneHot'TensorFlow.Ops
                              packTensorFlow.Ops
                              pack'TensorFlow.Ops
                              placeholderTensorFlow.Ops
                              placeholder'TensorFlow.Ops
                              QueueTensorFlow.Queue
                              rangeTensorFlow.Ops
                              range'TensorFlow.Ops
                              readValueTensorFlow.Variable
                              reducedShapeTensorFlow.Ops
                              reduceMeanTensorFlow.Ops
                              reduceMean'TensorFlow.Ops
                              reduceSumTensorFlow.Ops
                              reduceSum'TensorFlow.Ops
                              reluTensorFlow.Ops
                              relu'TensorFlow.Ops
                              reluGradTensorFlow.Ops
                              reluGrad'TensorFlow.Ops
                              reshapeTensorFlow.Ops
                              reshape'TensorFlow.Ops
                              resourceApplyAdamTensorFlow.Variable
                              resourceApplyAdam'TensorFlow.Variable
                              restoreTensorFlow.Ops
                              restoreFromNameTensorFlow.Ops
                              saveTensorFlow.Ops
                              scalarTensorFlow.Ops
                              scalar'TensorFlow.Ops
                              scalarizeTensorFlow.Ops
                              shapeTensorFlow.Ops
                              shape'TensorFlow.Ops
                              sigmoidCrossEntropyWithLogitsTensorFlow.NN
                              signTensorFlow.Ops
                              sign'TensorFlow.Ops
                              sizeTensorFlow.Ops
                              size'TensorFlow.Ops
                              softmaxTensorFlow.Ops
                              softmax'TensorFlow.Ops
                              softmaxCrossEntropyWithLogitsTensorFlow.Ops
                              softmaxCrossEntropyWithLogits'TensorFlow.Ops
                              sparseToDenseTensorFlow.Ops
                              sparseToDense'TensorFlow.Ops
                              subTensorFlow.Ops
                              sub'TensorFlow.Ops
                              sumTensorFlow.Ops
                              sum'TensorFlow.Ops
                              transposeTensorFlow.Ops
                              transpose'TensorFlow.Ops
                              truncatedNormalTensorFlow.Ops
                              truncatedNormal'TensorFlow.Ops
                              VariableTensorFlow.Variable
                              variable 
                              1 (Function)TensorFlow.Ops
                              2 (Function)TensorFlow.Variable
                              variable' 
                              1 (Function)TensorFlow.Ops
                              2 (Function)TensorFlow.Variable
                              vectorTensorFlow.Ops
                              vector'TensorFlow.Ops
                              zeroInitializedVariable 
                              1 (Function)TensorFlow.Ops
                              2 (Function)TensorFlow.Variable
                              zeroInitializedVariable' 
                              1 (Function)TensorFlow.Ops
                              2 (Function)TensorFlow.Variable
                              zerosTensorFlow.Ops
                              zerosLikeTensorFlow.Ops
                              zerosLike'TensorFlow.Ops
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/frames.html b/docs/haddock/tensorflow-ops-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-ops-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-ops-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-ops-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-ops-0.1.0.0/index-frames.html deleted file mode 100644 index e1ed028..0000000 --- a/docs/haddock/tensorflow-ops-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings. \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/index.html b/docs/haddock/tensorflow-ops-0.1.0.0/index.html index ef676e3..4cee320 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/index.html @@ -1,4 +1,4 @@ -tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                              tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                              tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                              Please see README.md

                              \ No newline at end of file +

                              tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                              tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

                              Please see README.md

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-EmbeddingOps.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-EmbeddingOps.html index 6bb59df..1e2e545 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-EmbeddingOps.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-EmbeddingOps.html @@ -1,4 +1,4 @@ -TensorFlow.EmbeddingOps

                              TensorFlow.EmbeddingOps

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Gradient.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Gradient.html index d309aff..4110fea 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Gradient.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Gradient.html @@ -1,4 +1,4 @@ -TensorFlow.Gradient

                              TensorFlow.Gradient

                              \ No newline at end of file +

                              TensorFlow.Gradient

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Minimize.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Minimize.html new file mode 100644 index 0000000..ad2423b --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Minimize.html @@ -0,0 +1,4 @@ +TensorFlow.Minimize

                              TensorFlow.Minimize

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/mini_TensorFlow-NN.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-NN.html similarity index 79% rename from docs/haddock/tensorflow-nn-0.1.0.0/mini_TensorFlow-NN.html rename to docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-NN.html index 148d202..f606ee9 100644 --- a/docs/haddock/tensorflow-nn-0.1.0.0/mini_TensorFlow-NN.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-NN.html @@ -1,4 +1,4 @@ -TensorFlow.NN

                              TensorFlow.NN

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html index 181e279..96d063a 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html @@ -1,4 +1,4 @@ -TensorFlow.Ops

                              TensorFlow.Ops

                              \ No newline at end of file +

                              TensorFlow.Ops

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Queue.html similarity index 84% rename from docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html rename to docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Queue.html index ee5969c..c5fc714 100644 --- a/docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Queue.html @@ -1,4 +1,4 @@ -TensorFlow.Queue

                              TensorFlow.Queue

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Variable.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Variable.html new file mode 100644 index 0000000..c398ea4 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Variable.html @@ -0,0 +1,4 @@ +TensorFlow.Variable

                              TensorFlow.Variable

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/ocean.css b/docs/haddock/tensorflow-ops-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-ops-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.EmbeddingOps.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.EmbeddingOps.html new file mode 100644 index 0000000..cfc5062 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.EmbeddingOps.html @@ -0,0 +1,92 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +{-# LANGUAGE ConstraintKinds #-}
                              +{-# LANGUAGE DataKinds #-}
                              +{-# LANGUAGE FlexibleContexts #-}
                              +{-# LANGUAGE NoMonomorphismRestriction #-}
                              +{-# LANGUAGE OverloadedStrings #-}
                              +{-# LANGUAGE RankNTypes #-}
                              +
                              +-- | Parallel lookups on the list of tensors.
                              +module TensorFlow.EmbeddingOps where
                              +
                              +import Control.Monad (zipWithM)
                              +import Data.Int (Int32, Int64)
                              +import TensorFlow.Build (MonadBuild)
                              +import TensorFlow.Ops (shape, vector)  -- Also Num instance for Tensor
                              +import TensorFlow.Tensor (Tensor, Value, Rendered, colocateWith, render)
                              +import TensorFlow.Types (OneOf, TensorType)
                              +import qualified TensorFlow.GenOps.Core as CoreOps
                              +
                              +-- | Looks up `ids` in a list of embedding tensors.
                              +--
                              +-- This function is used to perform parallel lookups on the list of
                              +-- tensors in `params`.  It is a generalization of `TF.gather`, where
                              +-- `params` is interpreted as a partition of a larger embedding
                              +-- tensor.
                              +--
                              +-- The partition_strategy is "mod", we assign each id to partition
                              +-- `p = id % len(params)`. For instance,
                              +-- 13 ids are split across 5 partitions as:
                              +-- `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
                              +--
                              +-- The results of the lookup are concatenated into a dense
                              +-- tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
                              +embeddingLookup :: forall a b v1 v2 m .
                              +                   ( MonadBuild m
                              +                   , Rendered (Tensor v1)
                              +                   , TensorType a
                              +                   , OneOf '[Int64, Int32] b
                              +                   , Num b
                              +                   )
                              +                => [Tensor v1 a]
                              +                -- ^ A list of tensors which can be concatenated along
                              +                -- dimension 0. Each `Tensor` must be appropriately
                              +                -- sized for `mod` partition strategy.
                              +                -> Tensor v2 b
                              +                -- ^ A `Tensor` with type `int32` or `int64`
                              +                -- containing the ids to be looked up in `params`.
                              +                -- The ids are required to have fewer than 2^31
                              +                -- entries.
                              +                -> m (Tensor Value a)
                              +                -- ^ A dense tensor with shape `shape(ids) + shape(params)[1:]`.
                              +embeddingLookup [p0] ids = colocateWith p0 (render $ CoreOps.gather p0 ids)
                              +embeddingLookup params@(p0 : _) ids = do
                              +    -- Do np separate lookups, finding embeddings for plist[p] in params[p]
                              +    partitionedResult <- zipWithM
                              +                        (\p g -> colocateWith p $ render $ CoreOps.gather p g)
                              +                        params gatherIds
                              +    let unshapedResult = CoreOps.dynamicStitch pindices partitionedResult
                              +    -- Shape restoration is not as optimal as it would be with client
                              +    -- side shape tracking.
                              +    paramShape <- colocateWith p0 (render (shape p0))
                              +    let finalShape = CoreOps.concat 0 [shape ids, tailShape]
                              +        tailShape = CoreOps.slice paramShape (singleton 1) (singleton (-1))
                              +    render $ CoreOps.reshape unshapedResult finalShape
                              +  where
                              +    -- Avoids genericLength here which would be evaluated by TF.
                              +    np = fromIntegral (length params)
                              +    flatIds = CoreOps.reshape ids (singleton (-1))
                              +    pAssignments = CoreOps.cast (flatIds `CoreOps.mod` np)
                              +    newIds = flatIds `CoreOps.div` np
                              +    originalIndices = CoreOps.range 0 (CoreOps.size flatIds) 1
                              +    -- Partition list of ids based on assignments into np separate lists
                              +    gatherIds = CoreOps.dynamicPartition np newIds pAssignments
                              +    -- Similarly, partition the original indices.
                              +    pindices = CoreOps.dynamicPartition np originalIndices pAssignments
                              +    singleton i = vector [i :: Int32]
                              +
                              +embeddingLookup [] _ = error "embeddingLookup requires params to be non empty"
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Gradient.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Gradient.html new file mode 100644 index 0000000..b6d0408 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Gradient.html @@ -0,0 +1,856 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +{-# LANGUAGE ConstraintKinds #-}
                              +{-# LANGUAGE DataKinds #-}
                              +{-# LANGUAGE FlexibleContexts #-}
                              +{-# LANGUAGE OverloadedStrings #-}
                              +{-# LANGUAGE RankNTypes #-}
                              +{-# LANGUAGE ScopedTypeVariables #-}
                              +{-# LANGUAGE TypeFamilies #-}
                              +{-# LANGUAGE ViewPatterns #-}
                              +
                              +module TensorFlow.Gradient
                              +    ( GradientCompatible
                              +    , gradients
                              +    ) where
                              +
                              +import Control.Monad (forM, zipWithM)
                              +import Control.Monad.State.Strict (State, evalState, gets, modify)
                              +import Data.ByteString (ByteString)
                              +import Data.Complex (Complex)
                              +import Data.Default (def)
                              +import Data.Int (Int32, Int64)
                              +import Data.Foldable (foldlM)
                              +import Data.List (foldl', sortBy)
                              +import Data.Map.Strict (Map)
                              +import Data.Maybe (fromMaybe, maybeToList, mapMaybe)
                              +import Data.Ord (comparing)
                              +import Data.ProtoLens.TextFormat (showMessage)
                              +import Data.Set (Set)
                              +import Data.Text (Text)
                              +import Data.Tuple (swap)
                              +import Lens.Family2 (Lens', view, (&), (^.), (.~), (%~))
                              +import Lens.Family2.State.Strict (uses)
                              +import Lens.Family2.Stock (at, intAt)
                              +import Lens.Family2.Unchecked (lens, iso)
                              +import Prelude hiding (sum)
                              +import Text.Printf (printf)
                              +import qualified Data.Graph.Inductive.Basic as FGL
                              +import qualified Data.Graph.Inductive.Graph as FGL
                              +import qualified Data.Graph.Inductive.PatriciaTree as FGL
                              +import qualified Data.Graph.Inductive.Query.DFS as FGL
                              +import qualified Data.IntMap.Strict as IntMap
                              +import qualified Data.Map.Strict as Map
                              +import qualified Data.Set as Set
                              +import qualified Data.Text as Text
                              +
                              +import qualified TensorFlow.GenOps.Core as CoreOps
                              +import TensorFlow.Build
                              +    ( MonadBuild
                              +    , Build
                              +    , build
                              +    , renderedNodeDefs
                              +    , opDef
                              +    , opAttr
                              +    , opInputs
                              +    )
                              +import TensorFlow.BuildOp
                              +import TensorFlow.Ops
                              +    ( addN
                              +    , broadcastGradientArgs
                              +    , expandDims
                              +    , fill
                              +    , matMul
                              +    , matMul'
                              +    , reducedShape
                              +    , reluGrad
                              +    , reshape
                              +    , scalar
                              +    , shape
                              +    , softmaxCrossEntropyWithLogits
                              +    , sum
                              +    , scalarize
                              +    , vector
                              +    , zerosLike
                              +    )
                              +import TensorFlow.Output
                              +    ( NodeName(..)
                              +    , Output(..)
                              +    , OutputIx(..)
                              +    , outputIndex
                              +    )
                              +import TensorFlow.Tensor
                              +    ( Tensor(..)
                              +    , Value
                              +    , render
                              +    , expr
                              +    , Rendered
                              +    , tensorNodeName
                              +    , renderedOutput
                              +    , renderValue
                              +    , ToTensor(..)
                              +    )
                              +import TensorFlow.Types (Attribute, OneOf, TensorType, attrLens)
                              +import Proto.Tensorflow.Core.Framework.NodeDef
                              +    (NodeDef, attr, input, op, name)
                              +
                              +type GradientCompatible a =
                              +    -- TODO(fmayle): MaxPoolGrad doesn't support Double for some reason.
                              +    (Num a, OneOf '[ Float, Complex Float, Complex Double ] a)
                              +
                              +-- TODO(fmayle): Support control flow.
                              +-- TODO(fmayle): Support gate_gradients-like option to avoid race conditions.
                              +-- TODO(fmayle): Do we need to consider control inputs? See _PendingCount in
                              +-- tensorflow/python/ops/gradients.py.
                              +-- TODO(fmayle): Maybe store the gradient functions and numOutputs on the OpDef.
                              +
                              +
                              +-- | Gradient of @y@ w.r.t. each element of @xs@.
                              +gradients :: forall a v1 t m . ( MonadBuild m
                              +                               , Rendered t
                              +                               , ToTensor t
                              +                               , GradientCompatible a
                              +                               )
                              +          => Tensor v1 a  -- ^ The output of the graph.
                              +          -> [t a]        -- ^ Tensors for which gradients are computed.
                              +          -> m [Tensor Value a]
                              +gradients y xs = build $ do
                              +    -- The gradients are computed using "reverse accumulation", similarly to
                              +    -- what is described here:
                              +    -- https://en.wikipedia.org/wiki/Automatic_differentiation#The_chain_rule.2C_forward_and_reverse_accumulation
                              +    --
                              +    -- The code is summarised as follows:
                              +    --
                              +    -- 1. Create an fgl graph of the relevant nodes (ops) and edges (tensors).
                              +    -- 2. Initialize the gradient of y to 1 (∂y/∂y = 1) and the rest of tensor's
                              +    --    gradients to nothing.
                              +    -- 3. Process the nodes in reverse topological order (i.e. each node comes
                              +    --    after all of its outputs so that the output gradients for a node have
                              +    --    been completely calculated before it is processed):
                              +    --      a. Record the gradient for each of the node's output tensors (∂y/∂w
                              +    --         for each output tensor w).
                              +    --      b. Calculate the gradient of y w.r.t. each of the node's input
                              +    --         tensors using the gradients of the node's output tensors.
                              +    --
                              +    --         Written differently, for each output tensor w and input tensor v:
                              +    --           ∂y/∂w = ...            (calculated in previous steps)
                              +    --           ∂w/∂v = ...            (op specific)
                              +    --           ∂y/∂v = ∂y/∂w * ∂w/∂v  (technically, if tensor v is an input
                              +    --                                   to multiple nodes, then this is only
                              +    --                                   part of ∂y/∂v)
                              +    --
                              +    -- 4. Lookup the recorded gradient for each x in xs.
                              +
                              +    y' <- renderValue y
                              +    let yName = tensorNodeName y'
                              +    yOne <- render $ fill (shape y') (scalar 1)
                              +    -- TODO(fmayle): Move this into Build.hs and call it unsafeNodeDefFromName?
                              +    nodeDefLookup :: (NodeName -> NodeDef) <- uses renderedNodeDefs $
                              +        (\f x -> fromMaybe (error $ "no NodeDef found for " ++ show x) (f x))
                              +        . flip Map.lookup
                              +    let (gr, nodeMap) = createGraph yName nodeDefLookup
                              +    -- Set gradient of y to one.
                              +    -- TODO: nicer
                              +    let initPending :: Map.Map FGL.Node (PendingGradients a)
                              +            = Map.empty & (at (nodeMap Map.! yName)
                              +                                . nonEmpty
                              +                                . outputIxAt (outputIndex $ renderedOutput y')
                              +                                . nonEmpty
                              +                                .~ [yOne]
                              +                                )
                              +    -- Calculate the gradients of y w.r.t. each node in the graph.
                              +    gradientMap <- graphGrads gr initPending
                              +    -- Lookup the gradients for each x.
                              +    forM xs $ \x ->
                              +        let Output i xName = renderedOutput x
                              +        in maybe (render $ zerosLike $ toTensor x) return $ do
                              +            n <- nodeMap ^. at xName
                              +            gradientMap ^. at n . nonEmpty . outputIxAt i
                              +
                              +outputIxAt :: OutputIx -> Lens' (IntMap.IntMap v) (Maybe v)
                              +outputIxAt = intAt . unOutputIx
                              +
                              +-- | Incomplete gradients of a node's outputs.
                              +--
                              +-- The lists represent partial sums. The key is an OutputIx sans newtype.
                              +type PendingGradients a = IntMap.IntMap [Tensor Value a]
                              +
                              +-- | Gradients of a node's outputs. The key is an OutputIx sans newtype.
                              +-- TODO: precache the rendering?
                              +type Gradients a = IntMap.IntMap (Tensor Value a)
                              +
                              +-- | Graph of TensorFlow operations.
                              +type Graph = FGL.Gr NodeDef EdgeLabel
                              +
                              +-- | Data associated with an edge.
                              +--
                              +-- Pair of
                              +--   1. Output index of a tensor from the source node.
                              +--   2. Input index that the tensor connects to on the destination node.
                              +type EdgeLabel = (OutputIx, OutputIx)
                              +
                              +
                              +-- | State used for calculating gradients.
                              +data GradientsState a = GradientsState
                              +                      { _gradientsPending :: !(Map FGL.Node (PendingGradients a))
                              +                      , _gradientsResult  :: !(Map FGL.Node (Gradients a))
                              +                      }
                              +
                              +gradientsPending :: Lens' (GradientsState a) (Map FGL.Node (PendingGradients a))
                              +gradientsPending = lens _gradientsPending (\x y -> x { _gradientsPending = y })
                              +
                              +gradientsResult :: Lens' (GradientsState a) (Map FGL.Node (Gradients a))
                              +gradientsResult = lens _gradientsResult (\x y -> x { _gradientsResult = y })
                              +
                              +
                              +-- TODO(fmayle): Use something like Data.List.Safe.
                              +-- | Safe version of (!!).
                              +safeIndex :: [a] -> Int -> Maybe a
                              +_      `safeIndex` n | n < 0 = Nothing
                              +[]     `safeIndex` _         = Nothing
                              +(x:_)  `safeIndex` 0         = Just x
                              +(_:xs) `safeIndex` n         = xs `safeIndex` (n-1)
                              +
                              +-- Copy of http://hackage.haskell.org/package/lens-3.9.0.2/docs/Control-Lens-Iso.html#v%3anon
                              +anon :: a -> (a -> Bool) -> Lens' (Maybe a) a
                              +anon a p = iso (fromMaybe a) go where
                              +  go b | p b       = Nothing
                              +       | otherwise = Just b
                              +
                              +non :: Eq a => a -> Lens' (Maybe a) a
                              +non a = anon a (a==)
                              +
                              +-- | Lens that defaults Nothing to mempty.
                              +nonEmpty :: (Monoid (t v), Foldable t) => Lens' (Maybe (t v)) (t v)
                              +nonEmpty = anon mempty null
                              +
                              +-- TODO: strictness (e.g., foldlM')
                              +
                              +-- | Calculate the gradients for every node in a graph.
                              +graphGrads :: forall a. GradientCompatible a
                              +           => Graph
                              +           -> Map FGL.Node (PendingGradients a)
                              +           -- ^ Initial gradients (usually just 1 for the node of interest).
                              +           -> Build (Map FGL.Node (Gradients a))
                              +graphGrads gr initPending = view gradientsResult <$> foldlM go initState nodeOrder
                              +  where
                              +    initState = GradientsState initPending Map.empty
                              +    -- Reverse topological sort.
                              +    -- TODO(fmayle): Filter out nodes that are not successors of any x in xs to
                              +    -- avoid calculating gradients that won't be used.
                              +    nodeOrder = FGL.topsort $ FGL.grev gr
                              +    go :: GradientsState a -> Int -> Build (GradientsState a)
                              +    go state node = do
                              +        -- Aggregate the accumulated gradients for this node.
                              +        outputGrads <-
                              +                sumPendingGradient (state ^. gradientsPending . at node . nonEmpty)
                              +        if null outputGrads
                              +           then pure state
                              +           else do
                              +              let ctx = FGL.context gr node
                              +              inputGrads <- calculateInputGrads ctx outputGrads gr
                              +              -- Calculate the gradients for each of the node's inputs.
                              +              let nextState = state & gradientsResult %~ Map.insert node outputGrads
                              +              pure $ updatePendingGradients ctx inputGrads nextState
                              +
                              +-- | Reduce accumulated gradients for each output to one Tensor.
                              +sumPendingGradient :: GradientCompatible a
                              +                   => PendingGradients a -> Build (Gradients a)
                              +sumPendingGradient = sequence . IntMap.mapMaybe f
                              +  where
                              +    f [] = Nothing
                              +    f [x] = Just (pure x)
                              +    f xs = Just (render $ addN xs)
                              +
                              +
                              +-- | Calculate the gradients of a node's input tensors.
                              +--
                              +-- This is mostly just a wrapper around opGrad.
                              +calculateInputGrads :: forall a. GradientCompatible a
                              +                    => FGL.Context NodeDef EdgeLabel
                              +                    -> Gradients a  -- ^ Output gradients of the node.
                              +                    -> Graph
                              +                    -> Build [Maybe (Tensor Value a)]
                              +calculateInputGrads (inputEdges, _, nodeDef, _) outputGrads gr = do
                              +    fullOutGrads <- fullOutputGrads (numOutputs nodeDef) (nodeDefName nodeDef)
                              +                        outputGrads
                              +    traverse (traverse render) $ opGrad (nodeDef ^. op) nodeDef inputTensors fullOutGrads
                              +  where
                              +    -- Create a tensor from an edge (technically an Output, but it seems less
                              +    -- confusing to refer to it as a tensor here).
                              +    edgeToTensor :: (EdgeLabel, FGL.Node) -> Output
                              +    edgeToTensor ((i, _), n) =
                              +        case FGL.lab gr n of
                              +            Just edgeNodeDef -> Output i (NodeName $ edgeNodeDef ^. name)
                              +            Nothing -> error $ "calculateInputGrads: missing input node for "
                              +                               ++ Text.unpack (nodeDef ^. name)
                              +    -- Input tensors, sorted by input index.
                              +    inputTensors = map edgeToTensor $ sortBy (comparing (snd . fst)) inputEdges
                              +
                              +-- | Convert a Map of gradients to a list, with zeros for missing outputs.
                              +fullOutputGrads :: (TensorType a, Num a)
                              +                => OutputIx  -- ^ Number of outputs.
                              +                -> NodeName
                              +                -> Gradients a
                              +                -> Build [Tensor Value a]
                              +fullOutputGrads n o gs =
                              +    mapM (\i -> maybe (render $ zero i) return (gs ^. outputIxAt i)) [0..n-1]
                              +  where
                              +    -- A tensor of zeros with the same shape as the i'th output.
                              +    zero i = zerosLike $ toT (Output i o)
                              +
                              +
                              +-- | Update the pending gradients of a node's inputs.
                              +updatePendingGradients :: forall a. (TensorType a, Num a)
                              +                       => FGL.Context NodeDef EdgeLabel
                              +                       -> [Maybe (Tensor Value a)]
                              +                       -- ^ Gradient of each input tensor.
                              +                       -> GradientsState a
                              +                       -> GradientsState a
                              +updatePendingGradients (inputEdges, _, nodeDef, _) inputGrads initState =
                              +    foldl' go initState inputEdges
                              +  where
                              +    go :: GradientsState a -> (EdgeLabel, FGL.Node) -> GradientsState a
                              +    go state ((outIndex, OutputIx inIndex), node) =
                              +        case maybeGradient of
                              +            Nothing -> state
                              +            Just g ->
                              +                -- Add to the list of pending gradients for this tensor.
                              +                state & gradientsPending
                              +                      . at node
                              +                      . nonEmpty
                              +                      . outputIxAt outIndex
                              +                      . nonEmpty
                              +                      %~ (g:)
                              +      where
                              +        badSizeErr = error $ printf "updatePendingGradients: bad input index \
                              +                                    \%d for inputGrads of length %d in %s"
                              +                                    inIndex (length inputGrads)
                              +                                    (show (nodeDef ^. name))
                              +        maybeGradient = fromMaybe badSizeErr (safeIndex inputGrads inIndex)
                              +
                              +
                              +-- | Create a graph that includes a node and its transitive dependencies.
                              +createGraph :: NodeName -> (NodeName -> NodeDef)
                              +            -> (Graph, Map NodeName FGL.Node)
                              +createGraph nodeName nodeDefLookup = (FGL.nmap nodeDefLookup graph, nodeMap)
                              +  where
                              +    -- Parse a tensor name.
                              +    parseTensorName :: Text -> Maybe (NodeName, OutputIx)
                              +    parseTensorName n
                              +        | Text.null n        = error "parseTensorName: empty name"
                              +        | Text.head n == '^' = Nothing  -- Control edge
                              +        | otherwise          =
                              +            let (nm, indexStr) = Text.breakOn ":" n
                              +                index | Text.null indexStr = 0
                              +                      | otherwise = read $ Text.unpack $ Text.tail indexStr
                              +            in Just (NodeName nm, OutputIx index)
                              +
                              +    -- Build a map from node name to outward edges.
                              +    --
                              +    -- The state is the set of visited nodes.
                              +    collect :: Maybe (NodeName, OutputIx, OutputIx)
                              +            -> NodeName
                              +            -> State (Set NodeName)
                              +                     (Map NodeName [(NodeName, OutputIx, OutputIx)])
                              +    collect outgoingEdge nm = do
                              +        let nextLookup = Map.singleton nm (maybeToList outgoingEdge)
                              +        seen <- gets (Set.member nm)
                              +        modify (Set.insert nm)
                              +        if seen
                              +            then pure nextLookup
                              +            else do
                              +                let inputs = nodeDefLookup nm ^. input
                              +                    recurse inIndex (parentName, outIndex) =
                              +                        collect (Just (nm, outIndex, inIndex)) parentName
                              +                subEdgeLookups <-
                              +                    zipWithM recurse [0..] $ mapMaybe parseTensorName inputs
                              +                pure $ Map.unionsWith (++) (nextLookup:subEdgeLookups)
                              +
                              +    edgeLookup = evalState (collect Nothing nodeName) Set.empty
                              +    -- Associate an ID with each node name.
                              +    nodeMap = Map.fromList $ zip (Map.keys edgeLookup) [0..]
                              +    -- Create the graph.
                              +    graph = FGL.mkGraph (swap <$> Map.toList nodeMap)
                              +                        [ (nodeMap Map.! n, nodeMap Map.! m, (i, j))
                              +                        | (n, edges) <- Map.toList edgeLookup
                              +                        , (m, i, j) <- edges
                              +                        ]
                              +
                              +-- | Function to compute the gradient of y w.r.t. each input.
                              +--
                              +-- Let y be an arbitrary tensor
                              +-- and [w_0, ..., w_n] be the output tensors of a node
                              +-- and [v_0, ..., v_n] be the input tensors of the same node.
                              +--
                              +-- Given [∂y/∂w_0, ..., ∂y/∂w_n] and [v_0, ..., v_n], a GradientFunc computes
                              +-- [∂y/∂v_0, ..., ∂y/∂v_n] for a particular op type.
                              +--
                              +-- A Nothing gradient is equivalent to zero (but allows for short circuiting
                              +-- computation when all the gradients for something are Nothing).
                              +type GradientFunc a = NodeDef
                              +                    -> [Output]
                              +                    -- ^ Input tensors.
                              +                    -> [Tensor Value a]
                              +                    -- ^ Gradient of y w.r.t. each output tensor.
                              +                    -> [Maybe (Tensor Build a)]
                              +                    -- ^ Gradient of y w.r.t. each input tensor.
                              +
                              +
                              +-- TODO(fmayle): Assert the type is correct.
                              +-- | Create a Tensor from an Output.
                              +toT :: Output -> Tensor Build a
                              +toT = Tensor . pure
                              +
                              +
                              +-- | Wrapper around `TensorFlow.GenOps.Core.slice` that builds vectors from scalars for
                              +-- simple slicing operations.
                              +flatSlice :: forall v1 t . TensorType t
                              +         => Tensor v1 t    -- ^ __input__
                              +         -> Int32          -- ^ __begin__: specifies the offset into the first dimension of
                              +                           -- 'input' to slice from.
                              +         -> Int32          -- ^ __size__: specifies the number of elements of the first dimension
                              +                           -- of 'input' to slice. If size is -1, all remaining elements in the dimension
                              +                           -- are included in the slice (i.e. this is equivalent to setting
                              +                           -- size = input.dim_size(0) - begin).
                              +         -> Tensor Build t -- ^ __output__
                              +flatSlice t begin size = CoreOps.slice t (vector [begin]) (vector [size])
                              +
                              +nodeDefName :: NodeDef -> NodeName
                              +nodeDefName = NodeName . view name
                              +
                              +-- | Gradient helper for binary component wise operations
                              +-- See https://github.com/tensorflow/tensorflow/blob/e9de087fa7f59c39bbe12ac2c83c5547c83f746c/tensorflow/core/ops/math_grad.cc#L329
                              +gradForBinaryCwise :: ( OneOf '[ Int32, Int64, Float, Double, Complex Float, Complex Double ] t
                              +                      )
                              +                   => (Tensor v1 t, Tensor v1 t)
                              +                   -> (Tensor v1 t, Tensor v1 t)
                              +                   -> [ Maybe (Tensor Build t) ]
                              +gradForBinaryCwise (x, gx) (y, gy) =
                              +    [ Just dx
                              +    , Just dy ]
                              +  where
                              +    dx = reshape (sum gx rx) sx
                              +    dy = reshape (sum gy ry) sy
                              +    sx = shape x
                              +    sy = shape y
                              +    (rx, ry) = broadcastGradientArgs sx sy
                              +
                              +-- | The gradient function for an op type.
                              +--
                              +-- These implementations should match their python counterparts in:
                              +-- third_party/tensorflow/python/ops/*_grad.py
                              +opGrad :: forall a . GradientCompatible a => Text -> GradientFunc a
                              +
                              +opGrad "Abs" _ [toT -> x] [dz] = [Just $ expr dz * signum x]
                              +opGrad "Neg" _ [_] [dz] = [Just $ negate $ expr dz]
                              +opGrad "Relu" _ [toT -> x] [dz] = [Just $ reluGrad dz x]
                              +opGrad "ReluGrad" _ [_, toT -> x ] [dz] = [Just $ reluGrad dz x, Just $ CoreOps.zerosLike x]
                              +
                              +opGrad "Concat" _ _ix [dy]
                              +    -- Concat concatenates input tensors
                              +    --   x1 of shape s1 = [k1, ..., ki_1, ..., kn]
                              +    --   x2 of shape s2 = [k1, ..., ki_2, ..., kn]
                              +    --    .           .     .          .        .
                              +    --    .           .     .          .        .
                              +    --    .           .     .          .        .
                              +    --   xm of shape sm = [k1, ..., ki_m, ..., kn]
                              +    --  along dimension i to an output tensor
                              +    --   y  of shape sy = [k1, ..., k, ..., kn]
                              +    --  where k = sum ki = sum [ki_1,...,ki_m]
                              +    --
                              +    --  The incoming gradient dy from backpropagation is
                              +    --   simply forwarded split across input tensors yielding dx.
                              +    --   Forwarded gradients have shapes s = [s1, ..., sm].
                              +    | m == 1    = Nothing : [Just $ expr dy]
                              +    | otherwise = Nothing : map Just (dx `reshapeZip` s)
                              +  where
                              +    reshapeZip = zipWith reshape
                              +    dx = CoreOps.splitV (fromIntegral m) dy ki _i
                              +    s  :: [Tensor Build Int32]
                              +    s  = map shape x
                              +    x  :: [Tensor Build a]
                              +    x  = map toT $ tail _ix
                              +    -- i: concat dimension. Adjusted modulo n to handle negative indices.
                              +    _i = toT (head _ix) `CoreOps.floorMod` n
                              +    i  = reshape _i $ vector [1 :: Int32]
                              +    -- sizes along concatenated dimension
                              +    ki :: Tensor Build Int32
                              +    ki = CoreOps.concat 0 $ map (\t -> CoreOps.slice t i $ vector [1 :: Int32]) s
                              +    m  = length x
                              +    n  = CoreOps.rank (head x)
                              +
                              +opGrad "Square" _ [toT -> x] [dz] =
                              +    -- TODO(fmayle): Handle complex numbers.
                              +    -- TODO(fmayle): The python code makes dz a control dependency of the 2*x
                              +    -- (for performance reasons?). Will need to put these functions in the Build
                              +    -- monad to replicate that.
                              +    [Just $ dz `CoreOps.mul` (2 * x)]
                              +
                              +opGrad "Gather" _ [toT -> x, toT -> indices] [dz] =
                              +    -- TODO(fmayle): The python version uses a better performance implementation
                              +    -- when the shape is known without having to run the graph.
                              +    -- TODO(fmayle): We shouldn't convert the result to a dense tensor. Sparse
                              +    -- tensor support will require some thinking.
                              +    [ Just $ CoreOps.unsortedSegmentSum values indices' numRows
                              +    , Nothing
                              +    ]
                              +  where
                              +    -- TODO(gnezdo): Use colocateWith but it requires Build monad.
                              +    denseShape = shape (x :: Tensor Build a)
                              +    numRows = scalarize $ flatSlice denseShape 0 1
                              +    valuesShape = CoreOps.concat 0 [ allDimensions
                              +                                   , flatSlice denseShape 1 (-1)
                              +                                   ]
                              +    values = reshape dz valuesShape
                              +    -- TODO(fmayle): This could be either Int32 or Int64.
                              +    indices' = reshape indices allDimensions :: Tensor Build Int32
                              +
                              +opGrad "Max" _ [toT -> x, toT -> indices] [dz] =
                              +    [Just $ indicators `CoreOps.div` numSelected * dz', Nothing]
                              +  where
                              +    sx = shape (x :: Tensor Build a)
                              +    outputShapeKeptDims = reducedShape sx (indices :: Tensor Build Int32)
                              +    y = CoreOps.max x indices
                              +    y' = reshape y outputShapeKeptDims
                              +    dz' = reshape dz outputShapeKeptDims
                              +    indicators = CoreOps.cast $ CoreOps.equal y' x
                              +    numSelected = reshape (sum indicators indices) outputShapeKeptDims
                              +
                              +-- Min and Max have identical gradient implementations.
                              +opGrad "Min" u v w = opGrad "Max" u v w
                              +
                              +-- Element wise maximum gradient
                              +-- See https://github.com/tensorflow/tensorflow/blob/e9de087fa7f59c39bbe12ac2c83c5547c83f746c/tensorflow/core/ops/math_grad.cc#L473
                              +opGrad "Maximum" _ [toT -> x, toT -> y] [dz] =
                              +    gradForBinaryCwise (x, gx) (y, gy)
                              +  where
                              +    xmask = CoreOps.greaterEqual x y
                              +    gx = CoreOps.select xmask dz (CoreOps.zerosLike dz)
                              +    gy = CoreOps.select (CoreOps.logicalNot xmask) dz (CoreOps.zerosLike dz)
                              +
                              +opGrad "Sum" _ [toT -> x, toT -> indices] [dz] =
                              +    [ Just $ CoreOps.tile grad tileScaling, Nothing ]
                              +  where
                              +    -- TODO(gnezdo): Implement the fast-path from math_grad._SumGrad.
                              +    sx = shape (x :: Tensor Build a)
                              +    outputShapeKeptDims = reducedShape sx (indices :: Tensor Build Int32)
                              +    tileScaling = safeShapeDiv sx outputShapeKeptDims
                              +    grad = reshape dz outputShapeKeptDims
                              +
                              +opGrad "Mean" u v@[toT -> x, _] w =
                              +    [Just $ dz `CoreOps.div` CoreOps.cast factor, Nothing]
                              +  where
                              +    [Just dz, Nothing] = opGrad "Sum" u v w
                              +    inputShape = shape (x :: Tensor Build a)
                              +    outputShape = shape (dz :: Tensor Build a)
                              +    -- TODO(fmayle): Add fast path when shape is known.
                              +    inputSize = CoreOps.prod inputShape $ rangeOfRank inputShape
                              +    outputSize = CoreOps.prod outputShape $ rangeOfRank outputShape
                              +    factor = safeShapeDiv inputSize outputSize
                              +
                              +opGrad "Add" _ [toT -> x, toT -> y] [dz] =
                              +    [ Just $ reshape (sum dz rx) sx
                              +    , Just $ reshape (sum dz ry) sy ]
                              +  where
                              +    sx = shape (x :: Tensor Build a)
                              +    sy = shape (y :: Tensor Build a)
                              +    (rx, ry) = broadcastGradientArgs sx sy
                              +
                              +-- Copies the gradients to all inputs
                              +-- Not broadcasting
                              +opGrad "AddN" _ inputs [dz] =
                              +    map ((const . Just . expr) dz) inputs
                              +
                              +opGrad "Sub" u v w =
                              +    [Just x, Just (-y)]
                              +  where
                              +    [Just x, Just y] = opGrad "Add" u v w
                              +
                              +opGrad "SoftmaxCrossEntropyWithLogits" _ [toT -> x, toT -> y] [dz, _] =
                              +    [ Just $ expandDims dz (-1) * snd (softmaxCrossEntropyWithLogits x y)
                              +    , Nothing ]
                              +
                              +opGrad "Mul" _ [toT -> x, toT -> y] [dz] =
                              +    -- TODO(fmayle): Handle complex numbers.
                              +    [ Just $ reshape (sum (dz `CoreOps.mul` y) rx) sx
                              +    , Just $ reshape (sum (x `CoreOps.mul` dz) ry) sy ]
                              +  where
                              +    sx = shape (x :: Tensor Build a)
                              +    sy = shape (y :: Tensor Build a)
                              +    (rx, ry) = broadcastGradientArgs sx sy
                              +
                              +opGrad "Div" _ [toT -> x, toT -> y] [dz] =
                              +    -- TODO(fmayle): Handle complex numbers.
                              +    -- TODO(gnezdo): Provide Fractional instance and use '/' instead of div.
                              +    [ Just $ reshape (sum (dz `CoreOps.div` y) rx) sx
                              +    , Just $ reshape (sum (dz `CoreOps.mul` (negate x `CoreOps.div` (y * y)))
                              +                         ry)
                              +                sy
                              +    ]
                              +  where
                              +    sx = shape (x :: Tensor Build a)
                              +    sy = shape (y :: Tensor Build a)
                              +    (rx, ry) = broadcastGradientArgs sx sy
                              +
                              +opGrad "MatMul" nodeDef [toT -> x, toT -> y] [dz] =
                              +    let transposeA = lookupAttr nodeDef "transpose_a"
                              +        transposeB = lookupAttr nodeDef "transpose_b"
                              +        transAttrs a b =
                              +            (opAttr "transpose_a" .~ a) . (opAttr "transpose_b" .~ b)
                              +    in case (transposeA, transposeB) of
                              +       (False, False) ->
                              +           [ Just $ matMul' (transAttrs False True) dz y
                              +           , Just $ matMul' (transAttrs True False) x dz]
                              +       (False, True) ->
                              +           [ Just $ matMul dz y
                              +           , Just $ matMul' (transAttrs True False) dz x]
                              +       (True, False) ->
                              +           [ Just $ matMul' (transAttrs False True) y dz
                              +           , Just $ matMul x dz]
                              +       (True, True) ->
                              +           [ Just $ matMul' (transAttrs True True) y dz
                              +           , Just $ matMul' (transAttrs True True) dz x]
                              +
                              +opGrad "Transpose" _ [_, toT -> p] [dz] =
                              +    [ Just $ CoreOps.transpose dz
                              +            (CoreOps.invertPermutation p :: Tensor Build Int32)
                              +    , Nothing
                              +    ]
                              +
                              +opGrad "Conv2D" nodeDef [toT -> x, toT -> y] [dz] =
                              +    [ Just $ CoreOps.conv2DBackpropInput'
                              +                ((opAttr "strides" .~ strides)
                              +                    . (opAttr "padding" .~ padding)
                              +                    . (opAttr "use_cudnn_on_gpu" .~ useCudnnOnGpu)
                              +                    . (opAttr "data_format" .~ dataFormat))
                              +                (shape x) y dz
                              +    , Just $ CoreOps.conv2DBackpropFilter'
                              +                ((opAttr "strides" .~ strides)
                              +                    . (opAttr "padding" .~ padding)
                              +                    . (opAttr "use_cudnn_on_gpu" .~ useCudnnOnGpu)
                              +                    . (opAttr "data_format" .~ dataFormat))
                              +                x (shape y) dz
                              +    ]
                              +  where
                              +    strides = lookupAttr nodeDef "strides" :: [Int64]
                              +    padding = lookupAttr nodeDef "padding" :: ByteString
                              +    useCudnnOnGpu = lookupAttr nodeDef "use_cudnn_on_gpu" :: Bool
                              +    dataFormat = lookupAttr nodeDef "data_format" :: ByteString
                              +
                              +opGrad "Conv2DBackpropInput" nodeDef [_, toT -> x, toT -> y] [dz] =
                              +    [ Nothing
                              +    , Just $ CoreOps.conv2DBackpropFilter'
                              +                ((opAttr "strides" .~ strides)
                              +                    . (opAttr "padding" .~ padding)
                              +                    . (opAttr "use_cudnn_on_gpu" .~ useCudnnOnGpu)
                              +                    . (opAttr "data_format" .~ dataFormat))
                              +                dz (shape x) y
                              +    , Just $ CoreOps.conv2D'
                              +                ((opAttr "strides" .~ strides)
                              +                    . (opAttr "padding" .~ padding)
                              +                    . (opAttr "use_cudnn_on_gpu" .~ useCudnnOnGpu)
                              +                    . (opAttr "data_format" .~ dataFormat))
                              +                dz x
                              +    ]
                              +  where
                              +    strides = lookupAttr nodeDef "strides" :: [Int64]
                              +    padding = lookupAttr nodeDef "padding" :: ByteString
                              +    useCudnnOnGpu = lookupAttr nodeDef "use_cudnn_on_gpu" :: Bool
                              +    dataFormat = lookupAttr nodeDef "data_format" :: ByteString
                              +
                              +opGrad "MaxPool" nodeDef [toT -> x] [dz] =
                              +    [ Just $ CoreOps.maxPoolGrad'
                              +                ((opAttr "ksize" .~ ksize)
                              +                    . (opAttr "strides" .~ strides)
                              +                    . (opAttr "padding" .~ padding)
                              +                    . (opAttr "data_format" .~ dataFormat))
                              +                x output dz
                              +    ]
                              +  where
                              +    output :: Tensor Build a
                              +    output = toT $ Output 0 (nodeDefName nodeDef)
                              +    ksize = lookupAttr nodeDef "ksize" :: [Int64]
                              +    strides = lookupAttr nodeDef "strides" :: [Int64]
                              +    padding = lookupAttr nodeDef "padding" :: ByteString
                              +    dataFormat = lookupAttr nodeDef "data_format" :: ByteString
                              +
                              +opGrad "Reshape" _ [toT -> x, _] [dz] =
                              +    [Just $ reshape dz $ shape (x :: Tensor Build a), Nothing]
                              +
                              +opGrad "OneHot" _ _ _ = [Nothing, Nothing, Nothing, Nothing]
                              +opGrad "TruncatedNormal" _ _ _ = [Nothing]
                              +
                              +opGrad "RefIdentity" _ _ [dz] = [Just $ expr dz]
                              +opGrad "Cast" nodeDef _ [dz] = [Just reverseCast]
                              +  where
                              +    -- TODO(gnezdo): too permissive, python only allows float types as src_type.
                              +    reverseCast =
                              +        pureOp [] $ pure (opDef "Cast"
                              +                 & opAttr "DstT" .~ (lookupAttr nodeDef "SrcT" :: ByteString)
                              +                 & opAttr "SrcT" .~ (lookupAttr nodeDef "DstT" :: ByteString)
                              +                 & opInputs .~ [renderedOutput dz])
                              +
                              +opGrad "DynamicStitch" nodeDef inputs [dz] =
                              +    replicate halfLen Nothing ++ valuesGrads
                              +  where
                              +    halfLen =
                              +        let len = length inputs
                              +            half = len `div` 2
                              +        in if 2 * half == len
                              +           then half
                              +           else error ("Uneven input size " ++ show (len, showMessage nodeDef))
                              +    valuesGrads = [ Just $ CoreOps.gather dz (toT idx :: Tensor Build Int32)
                              +                  | idx <- take halfLen inputs
                              +                  ]
                              +
                              +opGrad "DynamicPartition" nodeDef [toT -> xs, toT -> indices] dz =
                              +    [ Just reconstructed, Nothing ]
                              +  where
                              +    reconstructed = CoreOps.reshape stitched
                              +                    (CoreOps.shape (xs :: Tensor Build a) :: Tensor Build Int32)
                              +    stitched = CoreOps.dynamicStitch partitionedIndices dz
                              +    partitionedIndices = CoreOps.dynamicPartition np originalIndices indices
                              +    np = lookupAttr nodeDef "num_partitions" :: Int64
                              +    originalIndices =
                              +        CoreOps.reshape (CoreOps.range 0 (CoreOps.size indices) 1) prefixShape
                              +    prefixShape = shapeInt32 indices
                              +    shapeInt32 t = CoreOps.shape t :: Tensor Build Int32
                              +
                              +opGrad "Select" _ [toT -> c, toT -> x, _] [dz] =
                              +    [ Nothing
                              +    , Just $ CoreOps.select c dz zeros
                              +    , Just $ CoreOps.select c zeros dz
                              +    ]
                              +  where zeros = CoreOps.zerosLike x
                              +
                              +-- TODO(gnezdo): Unlike Python, no control dependency on dz.
                              +opGrad "Log" _ [toT -> x] [dz] = [ Just $ dz `CoreOps.mul` CoreOps.inv x ]
                              +-- TODO(gnezdo): Reuse the output instead of doing another exp,
                              +-- though, it is probably CSE'd away anyway.
                              +opGrad "Exp" _ [toT -> x] [dz] = [ Just $ dz `CoreOps.mul` CoreOps.exp x ]
                              +opGrad "SparseSegmentSum" _ [toT -> x, toT -> y, toT -> t] [dz] =
                              +    [ Just $ CoreOps.unsortedSegmentSum
                              +             (CoreOps.gather dz (t :: Tensor Build Int32))
                              +             (y :: Tensor Build Int32) inputRows
                              +    , Nothing
                              +    , Nothing
                              +    ]
                              +  where inputRows = flatSlice (shape (x :: Tensor Build a)) 0 1
                              +
                              +opGrad "LabelClasses" _ _ _ = [Nothing, Nothing]
                              +opGrad "LabelWeights" _ _ _ = [Nothing]
                              +opGrad "Size" _ _ _ = [Nothing]
                              +
                              +-- TODO (jcberentsen): Python implementation uses set_shape for
                              +-- static shape inference, which is unsupported.
                              +-- TODO: implement support for static shape inference
                              +opGrad "Tile" _ [toT -> x, toT -> multiples] [dz] =
                              +    [Just inputGrad, Nothing]
                              +  where
                              +    inputGrad = sum reshapedDz axes
                              +    inputShape = shape (x :: Tensor Build a)
                              +    packed = CoreOps.pack [multiples, inputShape]
                              +    perm = vector [1, 0 :: Int32]
                              +    splitShape = CoreOps.reshape (CoreOps.transpose packed perm) allDimensions
                              +    axes = CoreOps.range 0 (CoreOps.size splitShape) (2 :: Tensor Build Int32)
                              +    reshapedDz = CoreOps.reshape dz splitShape
                              +
                              +opGrad "ZerosLike" _ _ _ = [Nothing]
                              +opGrad "Fill" _ _ [dz] = [Nothing, Just $ sum dz rx]
                              +  where
                              +    rx = rangeOfRank dz
                              +
                              +-- Treat read ops as an identity function on the variable. This allows us to
                              +-- take gradients w.r.t. to the variable handle instead of the result of a read
                              +-- op. If a variable is read multiple times, the gradients will propagate back
                              +-- through each read.
                              +opGrad "ReadVariableOp" _ _ [dz] = [Just $ expr dz]
                              +
                              +-- TODO(fmayle): These can go away if we properly prune the graph.
                              +opGrad "Const" _ _ _ = [Nothing, Nothing]
                              +opGrad "Placeholder" _ _ _ = []
                              +opGrad "VarHandleOp" _ _ _ = []
                              +opGrad "Variable" _ _ _ = []
                              +
                              +opGrad n nodeDef ins grads =
                              +    error $ "no gradient implemented for " ++
                              +            show (n, length ins, length grads, showMessage nodeDef, ins)
                              +
                              +-- | The number of outputs for an op type.
                              +numOutputs :: NodeDef -> OutputIx
                              +numOutputs o =
                              +    case o ^. op of
                              +        "Abs" -> 1
                              +        "Add" -> 1
                              +        "AddN" -> 1
                              +        "Cast" -> 1
                              +        "Const" -> 1
                              +        "Concat" -> 1
                              +        "Conv2D" -> 1
                              +        "Conv2DBackpropInput" -> 1
                              +        "Div" -> 1
                              +        "DynamicStitch" -> 1
                              +        "DynamicPartition" ->
                              +            fromIntegral (lookupAttr o "num_partitions" :: Int64)
                              +        "Exp" -> 1
                              +        "Gather" -> 1
                              +        "LabelClasses" -> 1
                              +        "LabelWeights" -> 1
                              +        "Log" -> 1
                              +        "MatMul" -> 1
                              +        "Max" -> 1
                              +        "Maximum" -> 1
                              +        "MaxPool" -> 1
                              +        "Mean" -> 1
                              +        "Min" -> 1
                              +        "Mul" -> 1
                              +        "Neg" -> 1
                              +        "Placeholder" -> 1
                              +        "OneHot" -> 1
                              +        "ReadVariableOp" -> 1
                              +        "RefIdentity" -> 1
                              +        "Relu" -> 1
                              +        "ReluGrad" -> 1
                              +        "Reshape" -> 1
                              +        "Select" -> 1
                              +        "Size" -> 1
                              +        "SoftmaxCrossEntropyWithLogits" -> 2
                              +        "Square" -> 1
                              +        "SparseSegmentSum" -> 1
                              +        "Sub" -> 1
                              +        "Sum" -> 1
                              +        "Tile" -> 1
                              +        "Transpose" -> 1
                              +        "TruncatedNormal" -> 1
                              +        "VarHandleOp" -> 1
                              +        "Variable" -> 1
                              +        "ZerosLike" -> 1
                              +        "Fill" -> 1
                              +        _ -> error $ "numOutputs not implemented for " ++ show (o ^. op)
                              +
                              +-- Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`
                              +safeShapeDiv :: Tensor v1 Int32 -> Tensor v2 Int32 -> Tensor Build Int32
                              +safeShapeDiv x y = x `CoreOps.div` (CoreOps.maximum y 1)
                              +
                              +allDimensions :: Tensor Build Int32
                              +allDimensions = vector [-1 :: Int32]
                              +
                              +rangeOfRank :: forall v1 t. TensorType t => Tensor v1 t -> Tensor Build Int32
                              +rangeOfRank x = CoreOps.range 0 (CoreOps.rank x) 1
                              +
                              +lookupAttr ::  Attribute a1 => NodeDef -> Text -> a1
                              +lookupAttr nodeDef attrName = nodeDef ^. attr . at attrName . non def . attrLens
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Minimize.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Minimize.html new file mode 100644 index 0000000..2bf1ab0 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Minimize.html @@ -0,0 +1,116 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +{-# LANGUAGE FlexibleContexts #-}
                              +{-# LANGUAGE OverloadedStrings #-}
                              +{-# LANGUAGE RankNTypes #-}
                              +{-# LANGUAGE ScopedTypeVariables #-}
                              +{-# LANGUAGE TypeApplications #-}
                              +
                              +module TensorFlow.Minimize
                              +    ( Minimizer
                              +    , minimizeWith
                              +    , gradientDescent
                              +    , AdamConfig(..)
                              +    , adam
                              +    , adam'
                              +    ) where
                              +
                              +import Control.Monad (zipWithM)
                              +import Data.Default (Default(..))
                              +import Data.List (zipWith4)
                              +import Data.Maybe (fromMaybe)
                              +
                              +import qualified TensorFlow.Core as TF
                              +import qualified TensorFlow.Gradient as TF
                              +import qualified TensorFlow.Ops as TF hiding (assign, initializedVariable)
                              +import qualified TensorFlow.Variable as TF
                              +
                              +-- | Functions that minimize a loss w.r.t. a set of 'TF.Variable's.
                              +--
                              +-- Generally only performs one step of an iterative algorithm.
                              +--
                              +-- 'Minimizer's are defined as a function of the gradients instead of
                              +-- the loss so that users can apply transformations to the gradients.
                              +type Minimizer a =
                              +    forall m. TF.MonadBuild m =>
                              +    [TF.Variable a] -> [TF.Tensor TF.Value a] -> m TF.ControlNode
                              +
                              +-- | Convenience wrapper around 'TF.gradients' and a 'Minimizer'.
                              +minimizeWith :: (TF.MonadBuild m, TF.GradientCompatible a)
                              +             => Minimizer a
                              +             -> TF.Tensor v a    -- ^ Loss.
                              +             -> [TF.Variable a]  -- ^ Parameters of the loss function.
                              +             -> m TF.ControlNode
                              +minimizeWith minimizer loss params =
                              +    TF.gradients loss params >>= minimizer params
                              +
                              +-- | Perform one step of the gradient descent algorithm.
                              +gradientDescent :: TF.GradientCompatible a
                              +                => a  -- ^ Learning rate.
                              +                -> Minimizer a
                              +gradientDescent learningRate params grads = TF.withNameScope "gradientDescent" $ do
                              +    let applyGrad param grad =
                              +            TF.assignAdd param (TF.scalar (-learningRate) `TF.mul` grad)
                              +    TF.group =<< zipWithM applyGrad params grads
                              +
                              +-- TODO: Support more than Float in adam.
                              +
                              +data AdamConfig = AdamConfig
                              +    { adamLearningRate :: Float
                              +    , adamBeta1        :: Float
                              +    , adamBeta2        :: Float
                              +    , adamEpsilon      :: Float
                              +    }
                              +
                              +instance Default AdamConfig where
                              +  -- Recommended defaults from the adam paper.
                              +  def = AdamConfig 0.001 0.9 0.999 1e-8
                              +
                              +-- | Perform one step of the adam algorithm.
                              +--
                              +-- See https://arxiv.org/abs/1412.6980.
                              +--
                              +-- NOTE: Currently requires all 'TF.Variable's to have an 'TF.initializedValue'.
                              +adam :: Minimizer Float
                              +adam = adam' def
                              +
                              +adam' :: AdamConfig -> Minimizer Float
                              +adam' config params grads = TF.withNameScope "adam" $ do
                              +    let lr = TF.scalar (adamLearningRate config)
                              +        beta1 = TF.scalar (adamBeta1 config)
                              +        beta2 = TF.scalar (adamBeta2 config)
                              +        epsilon = TF.scalar (adamEpsilon config)
                              +    -- Create adam state variables.
                              +    let errorMsg = "TensorFlow.Minimize.adam requires an initial value for all variables"
                              +        initVal = fromMaybe (error errorMsg) . TF.initializedValue
                              +    ms <- mapM (TF.initializedVariable . TF.zerosLike . initVal) params
                              +    vs <- mapM (TF.initializedVariable . TF.zerosLike . initVal) params
                              +    beta1Power <- TF.initializedVariable beta1
                              +    beta2Power <- TF.initializedVariable beta2
                              +    -- Perform adam update.
                              +    let applyGrad param m v =
                              +            TF.resourceApplyAdam param m v
                              +                                 (TF.readValue beta1Power)
                              +                                 (TF.readValue beta2Power)
                              +                                 lr beta1 beta2 epsilon
                              +    updateVars <- sequence $ zipWith4 applyGrad params ms vs grads
                              +    -- Update beta variables after adam update.
                              +    let updateBeta betaPower beta =
                              +            TF.withControlDependencies updateVars
                              +                (TF.assign betaPower (TF.readValue betaPower `TF.mul` beta))
                              +    updateBeta1 <- updateBeta beta1Power beta1
                              +    updateBeta2 <- updateBeta beta2Power beta2
                              +    TF.group (updateBeta1:updateBeta2:updateVars)
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.NN.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.NN.html new file mode 100644 index 0000000..3a71384 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.NN.html @@ -0,0 +1,89 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +{-# LANGUAGE DataKinds #-}
                              +{-# LANGUAGE FlexibleContexts #-}
                              +{-# LANGUAGE OverloadedStrings #-}
                              +
                              +module TensorFlow.NN
                              +    ( sigmoidCrossEntropyWithLogits
                              +    ) where
                              +
                              +import Prelude hiding           ( log
                              +                                , exp
                              +                                )
                              +import TensorFlow.Build         ( MonadBuild
                              +                                , withNameScope
                              +                                )
                              +import TensorFlow.GenOps.Core   ( greaterEqual
                              +                                , select
                              +                                , log
                              +                                , exp
                              +                                )
                              +import TensorFlow.Tensor        ( Tensor(..)
                              +                                , render
                              +                                , Value
                              +                                )
                              +import TensorFlow.Types         ( TensorType(..)
                              +                                , OneOf
                              +                                )
                              +import TensorFlow.Ops           ( zerosLike
                              +                                , add
                              +                                , mul
                              +                                , neg
                              +                                )
                              +
                              +-- | Computes sigmoid cross entropy given `logits`.
                              +--
                              +-- Measures the probability error in discrete classification tasks in which each
                              +-- class is independent and not mutually exclusive.  For instance, one could
                              +-- perform multilabel classification where a picture can contain both an elephant
                              +-- and a dog at the same time.
                              +--
                              +-- For brevity, let `x = logits`, `z = targets`.  The logistic loss is
                              +--
                              +--        z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
                              +--      = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
                              +--      = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
                              +--      = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
                              +--      = (1 - z) * x + log(1 + exp(-x))
                              +--      = x - x * z + log(1 + exp(-x))
                              +--
                              +--  For x < 0, to avoid overflow in exp(-x), we reformulate the above
                              +--
                              +--        x - x * z + log(1 + exp(-x))
                              +--      = log(exp(x)) - x * z + log(1 + exp(-x))
                              +--      = - x * z + log(1 + exp(x))
                              +--
                              +--  Hence, to ensure stability and avoid overflow, the implementation uses this
                              +--  equivalent formulation
                              +--
                              +--      max(x, 0) - x * z + log(1 + exp(-abs(x)))
                              +--
                              +--  `logits` and `targets` must have the same type and shape.
                              +sigmoidCrossEntropyWithLogits
                              +  :: (MonadBuild m, OneOf '[Float, Double] a, TensorType a, Num a)
                              +     => Tensor Value a          -- ^ __logits__
                              +     -> Tensor Value a          -- ^ __targets__
                              +     -> m (Tensor Value a)
                              +sigmoidCrossEntropyWithLogits logits targets = do
                              +    let zeros = zerosLike logits
                              +        cond = logits `greaterEqual` zeros
                              +        relu_logits = select cond logits zeros
                              +        neg_abs_logits = select cond (neg logits) logits
                              +    withNameScope "logistic_loss" $ do
                              +        left <- render $ relu_logits - logits `mul` targets
                              +        right <- render $ log (1 + exp neg_abs_logits)
                              +        withNameScope "sigmoid_add" $ render $ left `add` right
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Ops.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Ops.html new file mode 100644 index 0000000..86aa901 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Ops.html @@ -0,0 +1,409 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +-- | This module contains definitions for some built-in TensorFlow operations.
                              +--
                              +-- Note that certain, "stateful" ops like 'variable' and 'assign' return a
                              +-- 'Build' action (e.g., @Build (Tensor Ref a)@ instead of a pure value; the
                              +-- returned 'Tensor's are always rendered in the current 'Build' context.  This
                              +-- approach helps us avoid problems with inlining or common subexpression
                              +-- elimination, by writing
                              +--
                              +-- > do
                              +-- >     v <- variable []
                              +-- >     w <- assign v 3
                              +-- >     render $ w * w
                              +--
                              +-- instead of
                              +--
                              +-- > let
                              +-- >    v = variable []
                              +-- >    w = assign v 3
                              +-- > in w * w
                              +--
                              +-- since the latter could be reasonably transformed by the compiler into (or
                              +-- vice versa)
                              +--
                              +-- > let
                              +-- >    v = variable []
                              +-- >    w = assign v 3
                              +-- >    w' = assign v 3
                              +-- > in w * w'
                              +--
                              +-- Ops should return a 'Build' action if their original 'OpDef' marks them as
                              +-- stateful, or if they take any Refs as input.  (This mirrors the rules that
                              +-- TensorFlow uses to avoid common subexpression elimination.)
                              +{-# LANGUAGE ConstraintKinds #-}
                              +{-# LANGUAGE DataKinds #-}
                              +{-# LANGUAGE FlexibleContexts #-}
                              +{-# LANGUAGE FlexibleInstances #-}
                              +{-# LANGUAGE OverloadedLists #-}
                              +{-# LANGUAGE OverloadedStrings #-}
                              +{-# LANGUAGE RankNTypes #-}
                              +{-# LANGUAGE ScopedTypeVariables #-}
                              +{-# LANGUAGE TypeFamilies #-}
                              +{-# LANGUAGE UndecidableInstances #-}
                              +{-# OPTIONS_GHC -fno-warn-orphans #-}
                              +
                              +module TensorFlow.Ops
                              +    ( CoreOps.add
                              +    , CoreOps.add'
                              +    , CoreOps.abs
                              +    , CoreOps.abs'
                              +    , CoreOps.addN
                              +    , CoreOps.addN'
                              +    , CoreOps.argMax
                              +    , CoreOps.argMax'
                              +    , CoreOps.assign
                              +    , CoreOps.assign'
                              +    , CoreOps.broadcastGradientArgs
                              +    , CoreOps.broadcastGradientArgs'
                              +    , CoreOps.cast
                              +    , CoreOps.cast'
                              +    , CoreOps.concat
                              +    , CoreOps.concat'
                              +    , constant
                              +    , constant'
                              +    , CoreOps.equal
                              +    , CoreOps.equal'
                              +    , expandDims
                              +    , expandDims'
                              +    , initializedVariable
                              +    , initializedVariable'
                              +    , zeroInitializedVariable
                              +    , zeroInitializedVariable'
                              +    , CoreOps.fill
                              +    , CoreOps.fill'
                              +    , CoreOps.identity
                              +    , CoreOps.identity'
                              +    , CoreOps.matMul
                              +    , CoreOps.matMul'
                              +    , matTranspose
                              +    , matTranspose'
                              +    , CoreOps.mean
                              +    , CoreOps.mean'
                              +    , CoreOps.mul
                              +    , CoreOps.mul'
                              +    , CoreOps.neg
                              +    , CoreOps.neg'
                              +    , CoreOps.oneHot
                              +    , CoreOps.oneHot'
                              +    , CoreOps.pack
                              +    , CoreOps.pack'
                              +    , placeholder
                              +    , placeholder'
                              +    , CoreOps.range
                              +    , CoreOps.range'
                              +    , reducedShape
                              +    , reduceMean
                              +    , reduceMean'
                              +    , CoreOps.relu
                              +    , CoreOps.relu'
                              +    , CoreOps.reluGrad
                              +    , CoreOps.reluGrad'
                              +    , CoreOps.reshape
                              +    , CoreOps.reshape'
                              +    , restore
                              +    , restoreFromName
                              +    , save
                              +    , scalar
                              +    , scalar'
                              +    , shape
                              +    , shape'
                              +    , CoreOps.sign
                              +    , CoreOps.sign'
                              +    , CoreOps.size
                              +    , CoreOps.size'
                              +    , CoreOps.softmax
                              +    , CoreOps.softmax'
                              +    , CoreOps.softmaxCrossEntropyWithLogits
                              +    , CoreOps.softmaxCrossEntropyWithLogits'
                              +    , CoreOps.sparseToDense
                              +    , CoreOps.sparseToDense'
                              +    , CoreOps.sub
                              +    , CoreOps.sub'
                              +    , CoreOps.sum
                              +    , CoreOps.sum'
                              +    , reduceSum
                              +    , reduceSum'
                              +    , CoreOps.transpose
                              +    , CoreOps.transpose'
                              +    , truncatedNormal
                              +    , truncatedNormal'
                              +    , CoreOps.variable
                              +    , CoreOps.variable'
                              +    , vector
                              +    , vector'
                              +    , zeros
                              +    , CoreOps.zerosLike
                              +    , CoreOps.zerosLike'
                              +    , scalarize
                              +    ) where
                              +
                              +import Data.ByteString (ByteString)
                              +import Data.Complex (Complex)
                              +import Data.Int (Int32, Int64)
                              +import Data.Word (Word16)
                              +import Prelude hiding (abs, sum, concat)
                              +import Data.ProtoLens (def)
                              +import Data.Text.Encoding (encodeUtf8)
                              +import Lens.Family2 ((.~), (&))
                              +import Text.Printf (printf)
                              +import Proto.Tensorflow.Core.Framework.Tensor
                              +    ( TensorProto
                              +    , dtype
                              +    , tensorShape
                              +    )
                              +import qualified Proto.Tensorflow.Core.Framework.TensorShape
                              +  as TensorShape
                              +import TensorFlow.Build
                              +import TensorFlow.BuildOp
                              +import TensorFlow.ControlFlow (group)
                              +import TensorFlow.Tensor
                              +import TensorFlow.Types
                              +
                              +import qualified TensorFlow.GenOps.Core as CoreOps
                              +
                              +import qualified Prelude (abs)
                              +
                              +-- TODO: Look into hs-boot refactoring to allow mutually recursive imports.
                              +-- | Must be defined as an orphan because of the dependency order between Ops
                              +-- and Tensor.
                              +--
                              +-- The indirect constraint "v ~ Value" helps disambiguate types, for example in
                              +-- "neg 1 :: Tensor Value Float", it helps find the type of the subexpression
                              +-- "1".
                              +instance ( TensorType a
                              +         , Num a
                              +         , v ~ Build
                              +         , OneOf '[ Double, Float, Int32, Int64
                              +                  , Complex Float, Complex Double] a) => Num (Tensor v a) where
                              +    (+) = CoreOps.add
                              +    (*) = CoreOps.mul
                              +    (-) = CoreOps.sub
                              +    abs = CoreOps.abs
                              +    fromInteger = scalar . fromInteger
                              +    signum = CoreOps.sign
                              +    negate = CoreOps.neg
                              +
                              +matTranspose :: TensorType a => Tensor e a -> Tensor Build a
                              +matTranspose = matTranspose' id
                              +
                              +matTranspose' :: TensorType a => OpParams -> Tensor v a -> Tensor Build a
                              +matTranspose' params = flip (CoreOps.transpose' params) (vector [1, 0 :: Int32])
                              +
                              +placeholder :: (MonadBuild m, TensorType a) => Shape -> m (Tensor Value a)
                              +placeholder = placeholder' id
                              +
                              +placeholder' :: forall m a . (MonadBuild m, TensorType a)
                              +             => OpParams -> Shape -> m (Tensor Value a)
                              +placeholder' params pShape
                              +    -- Note: we don't use CoreOps.placeholder' since that op isn't stateful,
                              +    -- and thus would be CSE'd.
                              +    = build $ buildOp [] $ opDef "Placeholder"
                              +                & opAttr "dtype" .~ tensorType (undefined :: a)
                              +                & opAttr "shape" .~ pShape
                              +                & params
                              +
                              +-- | Creates a variable initialized to the given value.
                              +-- Initialization happens next time session runs.
                              +initializedVariable :: (MonadBuild m, TensorType a)
                              +                    => Tensor v a -> m (Tensor Ref a)
                              +initializedVariable = initializedVariable' id
                              +
                              +initializedVariable' :: (MonadBuild m, TensorType a)
                              +                    => OpParams -> Tensor v a -> m (Tensor Ref a)
                              +initializedVariable' params initializer = do
                              +    v <- CoreOps.variable' params []  -- The shape is not known initially.
                              +    i <- CoreOps.assign' (opAttr "validate_shape" .~ False) v
                              +                            initializer
                              +    addInitializer =<< group i
                              +    return v
                              +
                              +-- | Creates a zero-initialized variable with the given shape.
                              +zeroInitializedVariable
                              +  :: (MonadBuild m, TensorType a, Num a) =>
                              +     TensorFlow.Types.Shape -> m (Tensor TensorFlow.Tensor.Ref a)
                              +zeroInitializedVariable = zeroInitializedVariable' id
                              +
                              +zeroInitializedVariable'
                              +  :: (MonadBuild m, TensorType a, Num a) =>
                              +     OpParams -> TensorFlow.Types.Shape -> m (Tensor TensorFlow.Tensor.Ref a)
                              +zeroInitializedVariable' params = initializedVariable' params . zeros
                              +
                              +-- TODO: Support heterogeneous list of tensors.
                              +save :: forall a m v . (Rendered (Tensor v), MonadBuild m, TensorType a)
                              +        => ByteString    -- ^ File path.
                              +        -> [Tensor v a]  -- ^ Tensors to save.
                              +        -> m ControlNode
                              +save path xs = build $ do
                              +    let toByteStringTensor = scalar . encodeUtf8 . encodeOutput . renderedOutput
                              +    let names = fmap toByteStringTensor xs
                              +    let types = replicate (length xs) (tensorType (undefined :: a))
                              +    names' <- buildInputs $ CoreOps.pack names
                              +    xs' <- buildInputs xs
                              +    path' <- buildInputs $ scalar path
                              +    buildOp [] $ opDef "Save"
                              +                    & opAttr "T" .~ types
                              +                    & opInputs .~ (path' ++ names' ++ xs')
                              +
                              +-- | Restore a tensor's value from a checkpoint file.
                              +--
                              +-- This version allows restoring from a checkpoint file that uses a different
                              +-- tensor name than the variable.
                              +restoreFromName :: forall a m . (MonadBuild m, TensorType a)
                              +                => ByteString    -- ^ File path.
                              +                -> ByteString    -- ^ Tensor name override.
                              +                -> Tensor Ref a  -- ^ Tensor to restore.
                              +                -> m ControlNode
                              +restoreFromName path name x = build $ do
                              +    path' <- buildInputs $ scalar path
                              +    name' <- buildInputs $ scalar name
                              +    restoreOp <- buildOp [] $ opDef "Restore"
                              +                               & opAttr "dt" .~ tensorType (undefined :: a)
                              +                               & opInputs .~ (path' ++ name')
                              +    group =<< CoreOps.assign x (restoreOp :: Tensor Value a)
                              +
                              +-- | Restore a tensor's value from a checkpoint file.
                              +restore :: forall a m . (MonadBuild m, TensorType a)
                              +        => ByteString    -- ^ File path.
                              +        -> Tensor Ref a  -- ^ Tensor to restore.
                              +        -> m ControlNode
                              +restore path x = restoreFromName path name x
                              +  where
                              +    name = encodeUtf8 $ encodeOutput $ renderedOutput x
                              +
                              +-- | Create a constant tensor.
                              +--
                              +-- The values should be in row major order, e.g.,
                              +--
                              +--   element 0:   index (0, ..., 0)
                              +--   element 1:   index (0, ..., 1)
                              +--   ...
                              +constant :: TensorType a => Shape -> [a] -> Tensor Build a
                              +constant = constant' id
                              +
                              +constant' :: forall a . TensorType a => OpParams -> Shape -> [a] -> Tensor Build a
                              +constant' params (Shape cShape) values
                              +    | invalidLength = error invalidLengthMsg
                              +    | otherwise = CoreOps.const' (params . (opAttr "value" .~ typedNode))
                              +  where
                              +    invalidLength = product cShape /= fromIntegral (length values)
                              +    invalidLengthMsg = printf "invalid tensor length: expected %d got %d"
                              +                              (product cShape)
                              +                              (length values)
                              +    typedNode :: TensorProto
                              +    typedNode = def
                              +                & dtype .~ tensorType (undefined :: a)
                              +                & tensorShape.TensorShape.dim .~
                              +                      [def & TensorShape.size .~ x | x <- cShape]
                              +                & tensorVal .~ values
                              +
                              +-- | Reshape a N-D tensor down to a scalar.
                              +--
                              +-- See `TensorFlow.GenOps.Core.reshape`.
                              +scalarize :: TensorType a => Tensor v a -> Tensor Build a
                              +scalarize t = CoreOps.reshape t (vector scalarShape)
                              +    where
                              +        scalarShape = [] :: [Int32]
                              +
                              +-- | Sum a tensor down to a scalar
                              +-- Seee `TensorFlow.GenOps.Core.sum`
                              +reduceSum :: (OneOf '[ Double, Float, Int32, Int64
                              +                     , Complex Float, Complex Double] a) =>
                              +             Tensor v a -> Tensor Build a
                              +reduceSum x = CoreOps.sum x allAxes
                              +  where allAxes = CoreOps.range 0 (CoreOps.rank x :: Tensor Build Int32) 1
                              +
                              +reduceSum' :: (OneOf '[ Double, Float, Int32, Int64
                              +                      , Complex Float, Complex Double] a) =>
                              +              OpParams -> Tensor v a -> Tensor Build a
                              +reduceSum' params x = CoreOps.sum' params x allAxes
                              +  where allAxes = CoreOps.range 0 (CoreOps.rank x :: Tensor Build Int32) 1
                              +
                              +-- | Computes the mean of elements across dimensions of a tensor.
                              +-- See `TensorFlow.GenOps.Core.mean`
                              +reduceMean
                              +  :: ( TensorType a
                              +     , OneOf '[ Double, Float, Complex Float, Complex Double] a
                              +     )
                              +  => Tensor v a -> Tensor Build a
                              +reduceMean = reduceMean' id
                              +
                              +reduceMean'
                              +  :: ( TensorType a
                              +     , OneOf '[ Double, Float, Complex Float, Complex Double] a
                              +     )
                              +  => OpParams -> Tensor v a -> Tensor Build a
                              +reduceMean' params x = CoreOps.mean' params x allAxes
                              +  where allAxes = CoreOps.range 0 (CoreOps.rank x :: Tensor Build Int32) 1
                              +
                              +-- | Create a constant vector.
                              +vector :: TensorType a => [a] -> Tensor Build a
                              +vector = vector' id
                              +
                              +vector' :: TensorType a => OpParams -> [a] -> Tensor Build a
                              +vector' params xs = constant' params [fromIntegral $ length xs] xs
                              +
                              +-- | Create a constant scalar.
                              +scalar :: TensorType a => a -> Tensor Build a
                              +scalar = scalar' id
                              +
                              +scalar' :: TensorType a => OpParams -> a -> Tensor Build a
                              +scalar' params x = constant' params [] [x]
                              +
                              +-- | Random tensor from the unit normal distribution with bounded values.
                              +--
                              +-- This is a type-restricted version of 'TensorFlow.GenOps.Core.truncatedNormal'.
                              +truncatedNormal :: (MonadBuild m, OneOf '[Word16, Double, Float] a)
                              +                => Tensor v Int64  -- ^ Shape.
                              +                -> m (Tensor Value a)
                              +truncatedNormal = CoreOps.truncatedNormal
                              +
                              +truncatedNormal' :: (MonadBuild m, OneOf '[Word16, Double, Float] a)
                              +                => OpParams -> Tensor v Int64  -- ^ Shape.
                              +                -> m (Tensor Value a)
                              +truncatedNormal' = CoreOps.truncatedNormal'
                              +
                              +zeros :: forall a . (Num a, TensorType a) => Shape -> Tensor Build a
                              +zeros (Shape s) = CoreOps.fill (vector $ map fromIntegral s) (scalar 0)
                              +
                              +shape :: TensorType t => Tensor v t -> Tensor Build Int32
                              +shape = CoreOps.shape
                              +
                              +shape' :: TensorType t => OpParams -> Tensor v t -> Tensor Build Int32
                              +shape' = CoreOps.shape'
                              +
                              +expandDims :: TensorType t => Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t
                              +expandDims = CoreOps.expandDims
                              +
                              +expandDims' :: TensorType t => OpParams -> Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t
                              +expandDims' = CoreOps.expandDims'
                              +
                              +-- | Helper function for reduction ops (translation of math_ops.reduced_shape).
                              +reducedShape :: (OneOf '[ Int32, Int64 ] t1, OneOf '[ Int32, Int64 ] t2) =>
                              +                Tensor v1 t1 -> Tensor v2 t2 -> Tensor Build Int32
                              +reducedShape inputShape axes =
                              +    let inputShape32 = toInt32 inputShape         -- [2, 3, 5, 7]
                              +        axes32 = toInt32 axes                     -- [1, 2]
                              +        toInt32 x = CoreOps.cast x :: Tensor Build Int32
                              +        inputRank = CoreOps.size inputShape32     -- 4
                              +        axesMod = (axes32 + inputRank) `CoreOps.mod` inputRank
                              +        axesShape = shape axesMod                 -- [2]
                              +    in CoreOps.dynamicStitch                      -- [2, 1, 1, 7]
                              +         [CoreOps.range 0 inputRank 1,            -- [0, 1, 2, 3]
                              +           axesMod]                               -- [1, 2]
                              +         [inputShape32,                           -- [2, 3, 5, 7]
                              +           CoreOps.fill axesShape 1]              -- [1, 1]
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Queue.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Queue.html new file mode 100644 index 0000000..540b151 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Queue.html @@ -0,0 +1,72 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +{-# LANGUAGE DataKinds #-}
                              +{-# LANGUAGE KindSignatures #-}
                              +{-# LANGUAGE OverloadedStrings #-}
                              +{-# LANGUAGE ScopedTypeVariables #-}
                              +
                              +-- | Queues in TensorFlow graph. Very limited support for now.
                              +module TensorFlow.Queue (Queue, makeQueue, enqueue, dequeue) where
                              +
                              +import Data.ByteString (ByteString)
                              +import Data.Int (Int64)
                              +import Data.Proxy (Proxy(..))
                              +import Lens.Family2 ((.~), (&))
                              +import TensorFlow.Build (ControlNode, MonadBuild, build, addInitializer, opAttr, opDef)
                              +import TensorFlow.BuildOp (buildOp)
                              +import TensorFlow.ControlFlow (group)
                              +import qualified TensorFlow.GenOps.Core as CoreOps
                              +import TensorFlow.Tensor (Ref, Value, Tensor, TensorList)
                              +import TensorFlow.Types (TensorTypes, fromTensorTypes)
                              +
                              +-- | A queue carrying tuples.
                              +data Queue (as :: [*]) = Queue { handle :: Handle }
                              +
                              +type Handle = Tensor Ref ByteString
                              +
                              +-- | Adds the given values to the queue.
                              +enqueue :: forall as v m . (MonadBuild m, TensorTypes as)
                              +           => Queue as
                              +           -> TensorList v as
                              +           -> m ControlNode
                              +enqueue = CoreOps.queueEnqueue . handle
                              +
                              +-- | Retrieves the values from the queue.
                              +dequeue :: forall as m . (MonadBuild m, TensorTypes as)
                              +           => Queue as
                              +           -> m (TensorList Value as)
                              +           -- ^ Dequeued tensors. They are coupled in a sense
                              +           -- that values appear together, even if they are
                              +           -- not consumed together.
                              +dequeue = CoreOps.queueDequeue . handle
                              +
                              +-- | Creates a new queue with the given capacity and shared name.
                              +makeQueue :: forall as m . (MonadBuild m, TensorTypes as)
                              +              => Int64  -- ^ The upper bound on the number of elements in
                              +                        --  this queue. Negative numbers mean no limit.
                              +              -> ByteString -- ^ If non-empty, this queue will be shared
                              +                            -- under the given name across multiple sessions.
                              +              -> m (Queue as)
                              +makeQueue capacity sharedName = do
                              +    q <- build $ buildOp [] (opDef "FIFOQueue"
                              +                     & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy as)
                              +                     & opAttr "shared_name" .~ sharedName
                              +                     & opAttr "capacity" .~ capacity
                              +                    )
                              +    group q >>= addInitializer
                              +    return (Queue q)
                              +
                              +-- TODO(gnezdo): Figure out the closing story for queues.
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Variable.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Variable.html new file mode 100644 index 0000000..9df08a8 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow.Variable.html @@ -0,0 +1,195 @@ +
                              -- | An implementation of ResourceHandle-based variables.
                              +--
                              +-- The main difference between this and 'Ref'-based variables is
                              +-- that reads are explicit, via the 'readValue' op.
                              +--
                              +-- TODO: given that distinction, figure out a good story around
                              +-- gradients and save/restore.  Then, merge this module into
                              +-- TensorFlow.Ops.
                              +{-# LANGUAGE DataKinds #-}
                              +{-# LANGUAGE FlexibleContexts #-}
                              +{-# LANGUAGE RecursiveDo #-}
                              +{-# LANGUAGE ScopedTypeVariables #-}
                              +{-# LANGUAGE OverloadedStrings #-}
                              +module TensorFlow.Variable
                              +    ( Variable
                              +    , variable
                              +    , variable'
                              +    , readValue
                              +    , initializedValue
                              +    , initializedVariable
                              +    , initializedVariable'
                              +    , zeroInitializedVariable
                              +    , zeroInitializedVariable'
                              +    , assign
                              +    , assign'
                              +    , assignAdd
                              +    , assignAdd'
                              +    , resourceApplyAdam
                              +    , resourceApplyAdam'
                              +    ) where
                              +
                              +import qualified Data.Complex
                              +import qualified Data.Int
                              +import qualified Data.Word
                              +import Data.Text.Encoding (encodeUtf8)
                              +import Lens.Family2 ((.~), (&))
                              +import TensorFlow.Core
                              +import TensorFlow.Build (opDef)
                              +import TensorFlow.BuildOp (buildInputs, pureOp, OpParams)
                              +import TensorFlow.Output (opInputs, unNodeName)
                              +import TensorFlow.Tensor (Rendered(..), ToTensor(..), renderValue, tensorNodeName)
                              +import TensorFlow.Types (tensorType)
                              +import qualified TensorFlow.GenOps.Core as CoreOps
                              +import TensorFlow.Ops (zeros)
                              +
                              +data Variable a = Variable
                              +    { variableHandle   :: Tensor Value ResourceHandle
                              +    , initializedValue :: Maybe (Tensor Value a)
                              +      -- ^ The initial value of a 'Variable' created with 'initializedVariable'.
                              +    }
                              +
                              +instance Rendered Variable where
                              +    renderedOutput = renderedOutput . variableHandle
                              +
                              +instance ToTensor Variable where
                              +    toTensor = readValue
                              +
                              +-- | Creates a new, uninitialized variable.
                              +variable :: (MonadBuild m, TensorType a) => Shape -> m (Variable a)
                              +variable = variable' id
                              +
                              +variable' :: forall m a . (MonadBuild m, TensorType a)
                              +                    => OpParams -> Shape -> m (Variable a)
                              +variable' params s = build $ do
                              +    -- Each variable needs a unique "shared_name".  Use MonadFix to
                              +    -- set the attribute to the same name as the variable itself, without
                              +    -- exposing more internals of the Build module.
                              +    rec t <- CoreOps.varHandleOp' (params . (opAttr "shared_name" .~ n))
                              +                                    (tensorType (undefined :: a)) s
                              +        let n = encodeUtf8 $ unNodeName $ tensorNodeName t
                              +    return $ Variable t Nothing
                              +
                              +-- | Creates a variable initialized to the given value.
                              +-- Initialization happens next time session runs.
                              +initializedVariable :: (MonadBuild m, TensorType a)
                              +                    => Tensor v a -> m (Variable a)
                              +initializedVariable = initializedVariable' id
                              +
                              +initializedVariable' :: forall a m v . (MonadBuild m, TensorType a)
                              +                    => OpParams -> Tensor v a -> m (Variable a)
                              +initializedVariable' params initializer = do
                              +    -- The shape is not known initially.
                              +    (Variable h Nothing :: Variable a) <- variable' params (Shape [])
                              +    initializer' <- renderValue initializer
                              +    i <- CoreOps.assignVariableOp h initializer'
                              +    addInitializer =<< group i
                              +    return (Variable h (Just initializer'))
                              +
                              +-- | Creates a zero-initialized variable with the given shape.
                              +zeroInitializedVariable
                              +  :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Variable a)
                              +zeroInitializedVariable = zeroInitializedVariable' id
                              +
                              +zeroInitializedVariable'
                              +  :: (MonadBuild m, TensorType a, Num a) => OpParams -> Shape -> m (Variable a)
                              +zeroInitializedVariable' params = initializedVariable' params . zeros
                              +
                              +-- | Gets the value stored in a variable.
                              +--
                              +-- Note that this op is stateful since it depends on the value of the variable;
                              +-- however, it may be CSE'd with other reads in the same context.  The context can
                              +-- be fixed by using 'render' along with (for example) 'withControlDependencies'.
                              +-- For example:
                              +--
                              +-- >   runSession $ do
                              +-- >     v <- variable []
                              +-- >     a <- assign v 24
                              +-- >     r <- withControlDependencies a $ render $ readValue v + 18
                              +-- >     result <- run r
                              +-- >     liftIO $ (42 :: Float) @=? unScalar result
                              +--
                              +--
                              +readValue :: TensorType a => Variable a -> Tensor Build a
                              +readValue = readValue' id
                              +
                              +readValue' :: forall a . TensorType a
                              +    => OpParams -> Variable a -> Tensor Build a
                              +readValue' params (Variable h _)
                              +    = pureOp [] $ do
                              +        os <- buildInputs h
                              +        pure $ opDef "ReadVariableOp"
                              +                & (params
                              +                    . (opAttr "dtype" .~ tensorType (undefined :: a))
                              +                    . (opInputs .~ os))
                              +
                              +-- | Sets the value of a variable.
                              +assign :: (MonadBuild m, TensorType a)
                              +    => Variable a -> Tensor v a -> m ControlNode
                              +assign = assign' id
                              +
                              +assign' :: (MonadBuild m, TensorType a)
                              +    => OpParams -> Variable a -> Tensor v a -> m ControlNode
                              +assign' params (Variable h _) v = CoreOps.assignVariableOp' params h v
                              +
                              +-- | Increments the value of a variable.
                              +assignAdd :: (MonadBuild m, TensorType a)
                              +    => Variable a -> Tensor v a -> m ControlNode
                              +assignAdd = assignAdd' id
                              +
                              +assignAdd' :: (MonadBuild m, TensorType a)
                              +    => OpParams -> Variable a -> Tensor v a -> m ControlNode
                              +assignAdd' params (Variable h _) v = CoreOps.assignAddVariableOp' params h v
                              +
                              +-- | Update '*var' according to the Adam algorithm.
                              +--
                              +-- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
                              +-- m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
                              +-- v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
                              +-- variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
                              +resourceApplyAdam ::
                              +    (MonadBuild m,
                              +     OneOf '[(Data.Complex.Complex Double),
                              +             (Data.Complex.Complex Float),
                              +             Data.Int.Int16,
                              +             Data.Int.Int32,
                              +             Data.Int.Int64, Data.Int.Int8,
                              +             Data.Word.Word16,
                              +             Data.Word.Word8, Double,
                              +             Float] t)
                              +    => Variable t -- ^ __var__: Should be from a Variable().
                              +    -> Variable t -- ^ __m__: Should be from a Variable().
                              +    -> Variable t -- ^ __v__: Should be from a Variable().
                              +    -> Tensor v1 t -- ^ __beta1_power__: Must be a scalar.
                              +    -> Tensor v2 t -- ^ __beta2_power__: Must be a scalar.
                              +    -> Tensor v3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                              +    -> Tensor v4 t -- ^ __beta1__: Momentum factor. Must be a scalar.
                              +    -> Tensor v5 t -- ^ __beta2__: Momentum factor. Must be a scalar.
                              +    -> Tensor v6 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                              +    -> Tensor v7 t -- ^ __grad__: The gradient.
                              +    -> m (ControlNode)
                              +resourceApplyAdam = resourceApplyAdam' id
                              +
                              +resourceApplyAdam' ::
                              +    (MonadBuild m,
                              +     OneOf '[(Data.Complex.Complex Double),
                              +             (Data.Complex.Complex Float),
                              +             Data.Int.Int16, Data.Int.Int32,
                              +             Data.Int.Int64, Data.Int.Int8,
                              +             Data.Word.Word16, Data.Word.Word8, Double,
                              +             Float] t)
                              +    => OpParams
                              +    -> Variable t -- ^ __var__: Should be from a Variable().
                              +    -> Variable t -- ^ __m__: Should be from a Variable().
                              +    -> Variable t -- ^ __v__: Should be from a Variable().
                              +    -> Tensor v1 t -- ^ __beta1_power__: Must be a scalar.
                              +    -> Tensor v2 t -- ^ __beta2_power__: Must be a scalar.
                              +    -> Tensor v3 t -- ^ __lr__: Scaling factor. Must be a scalar.
                              +    -> Tensor v4 t -- ^ __beta1__: Momentum factor. Must be a scalar.
                              +    -> Tensor v5 t -- ^ __beta2__: Momentum factor. Must be a scalar.
                              +    -> Tensor v6 t -- ^ __epsilon__: Ridge term. Must be a scalar.
                              +    -> Tensor v7 t -- ^ __grad__: The gradient.
                              +    -> m (ControlNode)
                              +resourceApplyAdam' params (Variable var _) (Variable m _) (Variable v _) =
                              +    CoreOps.resourceApplyAdam' params var m v
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-ops-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/style.css b/docs/haddock/tensorflow-ops-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt b/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt deleted file mode 100644 index ecf2a9a..0000000 --- a/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt +++ /dev/null @@ -1,491 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | Friendly layer around TensorFlow bindings. --- --- Please see README.md -@package tensorflow-ops -@version 0.1.0.0 - - --- | This module contains definitions for some built-in TensorFlow --- operations. --- --- Note that certain, "stateful" ops like variable and --- assign return a Build action (e.g., Build (Tensor --- Ref a) instead of a pure value; the returned Tensors are --- always rendered in the current Build context. This approach --- helps us avoid problems with inlining or common subexpression --- elimination, by writing --- ---
                              ---   do
                              ---       v <- variable []
                              ---       w <- assign v 3
                              ---       render $ w * w
                              ---   
                              --- --- instead of --- ---
                              ---   let
                              ---      v = variable []
                              ---      w = assign v 3
                              ---   in w * w
                              ---   
                              --- --- since the latter could be reasonably transformed by the compiler into --- (or vice versa) --- ---
                              ---   let
                              ---      v = variable []
                              ---      w = assign v 3
                              ---      w' = assign v 3
                              ---   in w * w'
                              ---   
                              --- --- Ops should return a Build action if their original --- OpDef marks them as stateful, or if they take any Refs as --- input. (This mirrors the rules that TensorFlow uses to avoid common --- subexpression elimination.) -module TensorFlow.Ops - --- | Returns x + y element-wise. --- ---
                                ---
                              • NOTE*: Add supports broadcasting. AddN does not. --- More about broadcasting here
                              • ---
                              -add :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -add' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the absolute value of a tensor. --- --- Given a tensor x, this operation returns a tensor containing --- the absolute value of each element in x. For example, if x is --- an input element and y is an output element, this operation computes --- \(y = |x|\). -abs :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t => Tensor v'1 t -> Tensor Build t -abs' :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Add all input tensors element wise. -addN :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => [Tensor v'1 t] -> Tensor Build t -addN' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => OpParams -> [Tensor v'1 t] -> Tensor Build t - --- | Returns the index with the largest value across dimensions of a --- tensor. -argMax :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 -argMax' :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 - --- | Update ref by assigning value to it. --- --- This operation outputs "ref" after the assignment is done. This makes --- it easier to chain operations that need to use the reset value. -assign :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) -assign' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) - --- | Return the reduction indices for computing gradients of s0 op s1 with --- broadcast. --- --- This is typically used by gradient computations for a broadcasting --- operation. -broadcastGradientArgs :: OneOf ((:) * Int32 ((:) * Int64 ([] *))) t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) -broadcastGradientArgs' :: OneOf ((:) * Int32 ((:) * Int64 ([] *))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) - --- | Cast x of type SrcT to y of DstT. -cast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT -cast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT - --- | Concatenates tensors along one dimension. -concat :: TensorType t => Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t -concat' :: TensorType t => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t - --- | Create a constant tensor. --- --- The values should be in row major order, e.g., --- --- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ... -constant :: TensorType a => Shape -> [a] -> Tensor Build a -constant' :: TensorType a => OpParams -> Shape -> [a] -> Tensor Build a - --- | Returns the truth value of (x == y) element-wise. --- ---
                                ---
                              • NOTE*: Equal supports broadcasting. More about --- broadcasting here
                              • ---
                              -equal :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -equal' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -expandDims :: TensorType t => Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t -expandDims' :: TensorType t => OpParams -> Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t - --- | Creates a variable initialized to the given value. Initialization --- happens next time session runs. -initializedVariable :: (MonadBuild m, TensorType a) => Tensor v a -> m (Tensor Ref a) -initializedVariable' :: (MonadBuild m, TensorType a) => OpParams -> Tensor v a -> m (Tensor Ref a) - --- | Creates a zero-initialized variable with the given shape. -zeroInitializedVariable :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Tensor Ref a) -zeroInitializedVariable' :: (MonadBuild m, TensorType a, Num a) => OpParams -> Shape -> m (Tensor Ref a) - --- | Creates a tensor filled with a scalar value. --- --- This operation creates a tensor of shape dims and fills it --- with value. --- --- For example: --- --- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) --- ==> [[9, 9, 9] [9, 9, 9]] ``` -fill :: TensorType t => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t -fill' :: TensorType t => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t - --- | Return a tensor with the same shape and contents as the input tensor --- or value. -identity :: TensorType t => Tensor v'1 t -> Tensor Build t -identity' :: TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Multiply the matrix "a" by the matrix "b". --- --- The inputs must be two-dimensional matrices and the inner dimension of --- "a" (after being transposed if transpose_a is true) must match the --- outer dimension of "b" (after being transposed if transposed_b is --- true). --- ---
                                ---
                              • Note*: The default kernel implementation for MatMul on GPUs uses --- cublas.
                              • ---
                              -matMul :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -matMul' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -matTranspose :: TensorType a => Tensor e a -> Tensor Build a -matTranspose' :: TensorType a => OpParams -> Tensor v a -> Tensor Build a - --- | Computes the mean of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -mean :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -mean' :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Returns x * y element-wise. --- ---
                                ---
                              • NOTE*: Mul supports broadcasting. More about broadcasting --- here
                              • ---
                              -mul :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -mul' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes numerical negative value element-wise. --- --- I.e., \(y = -x\). -neg :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => Tensor v'1 t -> Tensor Build t -neg' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns a one-hot tensor. --- --- The locations represented by indices in indices take value --- on_value, while all other locations take value --- off_value. --- --- If the input indices is rank N, the output will have --- rank `N+1`, The new axis is created at dimension axis --- (default: the new axis is appended at the end). --- --- If indices is a scalar the output shape will be a vector of --- length depth. --- --- If indices is a vector of length features, the --- output shape will be: ``` features x depth if axis == -1 depth x --- features if axis == 0 ``` --- --- If indices is a matrix (batch) with shape `[batch, --- features]`, the output shape will be: ``` batch x features x depth if --- axis == -1 batch x depth x features if axis == 1 depth x batch x --- features if axis == 0 ``` --- --- Examples ========= --- --- Suppose that --- --- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 --- axis = -1 ``` --- --- Then output is `[4 x 3]`: --- --- ```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) --- [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ``` --- --- Suppose that --- --- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 --- axis = 0 ``` --- --- Then output is `[3 x 4]`: --- --- ```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 --- 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ --- one_hot(1) ``` Suppose that --- --- ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = --- 0.0 axis = -1 ``` --- --- Then output is `[2 x 2 x 3]`: --- --- ```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // --- one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // --- one_hot(-1) ]``` -oneHot :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) => Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t -oneHot' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) => OpParams -> Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t - --- | Packs a list of N rank-R tensors into one --- rank-`(R+1)` tensor. --- --- Packs the N tensors in values into a tensor with --- rank one higher than each tensor in values, by packing them --- along the axis dimension. Given a list of tensors of shape --- `(A, B, C)`; --- --- if `axis == 0` then the output tensor will have the shape --- `(N, A, B, C)`. if `axis == 1` then the output tensor will --- have the shape `(A, N, B, C)`. Etc. --- --- For example: --- --- ```prettyprint # x is [1, 4] # y is [2, 5] # --- z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # --- Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, --- 6]] ``` --- --- This is the opposite of unpack. -pack :: TensorType t => [Tensor v'1 t] -> Tensor Build t -pack' :: TensorType t => OpParams -> [Tensor v'1 t] -> Tensor Build t -placeholder :: (MonadBuild m, TensorType a) => Shape -> m (Tensor Value a) -placeholder' :: (MonadBuild m, TensorType a) => OpParams -> Shape -> m (Tensor Value a) - --- | Creates a sequence of numbers. --- --- This operation creates a sequence of numbers that begins at --- start and extends by increments of delta up to but --- not including limit. --- --- For example: --- --- ``` # start is 3 # limit is 18 # delta is 3 --- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ``` -range :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx => Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx -range' :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx => OpParams -> Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx - --- | Helper function for reduction ops (translation of --- math_ops.reduced_shape). -reducedShape :: (OneOf '[Int32, Int64] t1, OneOf '[Int32, Int64] t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Build Int32 - --- | Computes rectified linear: `max(features, 0)`. -relu :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => Tensor v'1 t -> Tensor Build t -relu' :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes rectified linear gradients for a Relu operation. -reluGrad :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -reluGrad' :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Reshapes a tensor. --- --- Given tensor, this operation returns a tensor that has the --- same values as tensor with shape shape. --- --- If one component of shape is the special value -1, the size of --- that dimension is computed so that the total size remains constant. In --- particular, a shape of `[-1]` flattens into 1-D. At most one --- component of shape can be -1. --- --- If shape is 1-D or higher, then the operation returns a tensor --- with shape shape filled with the values of tensor. In --- this case, the number of elements implied by shape must be the --- same as the number of elements in tensor. --- --- For example: --- --- ```prettyprint # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] # --- tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], --- [4, 5, 6], [7, 8, 9]] --- --- # tensor t is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor --- t has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2, --- 2], [3, 3, 4, 4]] --- --- # tensor t is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4, --- 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor t has shape [3, --- 2, 3] # pass '[-1]' to flatten t reshape(t, [-1]) ==> [1, --- 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] --- --- # -1 can also be used to infer the shape --- --- # -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, --- 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: --- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, --- 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) --- ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, --- 6, 6]]] --- --- # tensor t is [7] # shape `[]` reshapes to a scalar --- reshape(t, []) ==> 7 ``` -reshape :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t -reshape' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t - --- | Restore a tensor's value from a checkpoint file. -restore :: (MonadBuild m, TensorType a) => ByteString -> Tensor Ref a -> m ControlNode - --- | Restore a tensor's value from a checkpoint file. --- --- This version allows restoring from a checkpoint file that uses a --- different tensor name than the variable. -restoreFromName :: (MonadBuild m, TensorType a) => ByteString -> ByteString -> Tensor Ref a -> m ControlNode -save :: (Rendered v, MonadBuild m, TensorType a) => ByteString -> [Tensor v a] -> m ControlNode - --- | Create a constant scalar. -scalar :: TensorType a => a -> Tensor Build a -scalar' :: TensorType a => OpParams -> a -> Tensor Build a -shape :: TensorType t => Tensor v t -> Tensor Build Int32 -shape' :: TensorType t => OpParams -> Tensor v t -> Tensor Build Int32 - --- | Returns an element-wise indication of the sign of a number. --- --- `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`. --- --- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y --- = 0`. -sign :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => Tensor v'1 t -> Tensor Build t -sign' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Returns the size of a tensor. --- --- This operation returns an integer representing the number of elements --- in input. --- --- For example: --- --- ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], --- [4, 4, 4]]]] size(t) ==> 12 ``` -size :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) => Tensor v'1 t -> Tensor Build out_type -size' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type - --- | Computes softmax activations. --- --- For each batch i and class j we have --- --- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) -softmax :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => Tensor v'1 t -> Tensor Build t -softmax' :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Computes softmax cross entropy cost and gradients to backpropagate. --- --- Inputs are the logits, not probabilities. -softmaxCrossEntropyWithLogits :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) -softmaxCrossEntropyWithLogits' :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) - --- | Converts a sparse representation into a dense tensor. --- --- Builds an array dense with shape output_shape such --- that --- --- ```prettyprint # If sparse_indices is scalar dense[i] = (i == --- sparse_indices ? sparse_values : default_value) --- --- # If sparse_indices is a vector, then for each i --- dense[sparse_indices[i]] = sparse_values[i] --- --- # If sparse_indices is an n by d matrix, then for each i in [0, n) --- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = --- sparse_values[i] ``` --- --- All other values in dense are set to default_value. --- If sparse_values is a scalar, all sparse indices are set to --- this single value. --- --- Indices should be sorted in lexicographic order, and indices must not --- contain any repeats. If validate_indices is true, these --- properties are checked during execution. -sparseToDense :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) => Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t -sparseToDense' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t - --- | Returns x - y element-wise. --- ---
                                ---
                              • NOTE*: Sub supports broadcasting. More about broadcasting --- here
                              • ---
                              -sub :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -sub' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t - --- | Computes the sum of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -sum :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -sum' :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t - --- | Shuffle dimensions of x according to a permutation. --- --- The output y has the same rank as x. The shapes of --- x and y satisfy: `y.shape[i] == x.shape[perm[i]] for --- i in [0, 1, ..., rank(x) - 1]` -transpose :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) => Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t -transpose' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) => OpParams -> Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t - --- | Random tensor from the unit normal distribution with bounded values. --- --- This is a type-restricted version of truncatedNormal. -truncatedNormal :: (MonadBuild m, OneOf '[Word16, Double, Float] a) => Tensor v Int64 -> m (Tensor Value a) -truncatedNormal' :: (MonadBuild m, OneOf '[Word16, Double, Float] a) => OpParams -> Tensor v Int64 -> m (Tensor Value a) - --- | Use VariableV2 instead. -variable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) -variable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) - --- | Create a constant vector. -vector :: TensorType a => [a] -> Tensor Build a -vector' :: TensorType a => OpParams -> [a] -> Tensor Build a -zeros :: (Num a, TensorType a) => Shape -> Tensor Build a - --- | Returns a tensor of zeros with the same shape and type as x. -zerosLike :: TensorType t => Tensor v'1 t -> Tensor Build t -zerosLike' :: TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t - --- | Reshape a N-D tensor down to a scalar. --- --- See reshape. -scalarize :: TensorType a => Tensor v a -> Tensor Build a -instance (TensorFlow.Types.TensorType a, GHC.Num.Num a, v ~ TensorFlow.Build.Build, TensorFlow.Types.OneOf '[GHC.Types.Double, GHC.Types.Float, GHC.Int.Int32, GHC.Int.Int64, Data.Complex.Complex GHC.Types.Float, Data.Complex.Complex GHC.Types.Double] a) => GHC.Num.Num (TensorFlow.Tensor.Tensor v a) - - --- | Parallel lookups on the list of tensors. -module TensorFlow.EmbeddingOps - --- | Looks up ids in a list of embedding tensors. --- --- This function is used to perform parallel lookups on the list of --- tensors in params. It is a generalization of gather, --- where params is interpreted as a partition of a larger --- embedding tensor. --- --- The partition_strategy is "mod", we assign each id to partition `p = --- id % len(params)`. For instance, 13 ids are split across 5 partitions --- as: `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]` --- --- The results of the lookup are concatenated into a dense tensor. The --- returned tensor has shape `shape(ids) + shape(params)[1:]`. -embeddingLookup :: (MonadBuild m, Rendered v1, TensorType a, OneOf '[Int64, Int32] b, Num b) => [Tensor v1 a] -> Tensor v2 b -> m (Tensor Value a) - -module TensorFlow.Gradient - --- | Gradient of y w.r.t. each element of xs. -gradients :: (MonadBuild m, Rendered v2, GradientCompatible a) => Tensor v1 a -> [Tensor v2 a] -> m [Tensor Value a] diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/LICENSE b/docs/haddock/tensorflow-proto-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-Example.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-Example.html new file mode 100644 index 0000000..2793de1 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-Example.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Example.Example

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Example.Example

                              Documentation

                              data Example Source #

                              Constructors

                              Example 

                              Instances

                              Eq Example Source # 

                              Methods

                              (==) :: Example -> Example -> Bool #

                              (/=) :: Example -> Example -> Bool #

                              Ord Example Source # 
                              Show Example Source # 
                              Message Example Source # 

                              Methods

                              descriptor :: MessageDescriptor Example

                              Default Example Source # 

                              Methods

                              def :: Example

                              ((~) * a Features, (~) * b Features, Functor f) => HasLens "features" f Example Example a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "features" -> (a -> f b) -> Example -> f Example

                              ((~) * a (Maybe Features), (~) * b (Maybe Features), Functor f) => HasLens "maybe'features" f Example Example a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'features" -> (a -> f b) -> Example -> f Example

                              data SequenceExample Source #

                              Instances

                              Eq SequenceExample Source # 
                              Ord SequenceExample Source # 
                              Show SequenceExample Source # 
                              Message SequenceExample Source # 

                              Methods

                              descriptor :: MessageDescriptor SequenceExample

                              Default SequenceExample Source # 
                              ((~) * a Features, (~) * b Features, Functor f) => HasLens "context" f SequenceExample SequenceExample a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "context" -> (a -> f b) -> SequenceExample -> f SequenceExample

                              ((~) * a FeatureLists, (~) * b FeatureLists, Functor f) => HasLens "featureLists" f SequenceExample SequenceExample a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "featureLists" -> (a -> f b) -> SequenceExample -> f SequenceExample

                              ((~) * a (Maybe Features), (~) * b (Maybe Features), Functor f) => HasLens "maybe'context" f SequenceExample SequenceExample a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'context" -> (a -> f b) -> SequenceExample -> f SequenceExample

                              ((~) * a (Maybe FeatureLists), (~) * b (Maybe FeatureLists), Functor f) => HasLens "maybe'featureLists" f SequenceExample SequenceExample a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'featureLists" -> (a -> f b) -> SequenceExample -> f SequenceExample

                              context :: forall f s t a b. HasLens "context" f s t a b => LensLike f s t a b Source #

                              featureLists :: forall f s t a b. HasLens "featureLists" f s t a b => LensLike f s t a b Source #

                              features :: forall f s t a b. HasLens "features" f s t a b => LensLike f s t a b Source #

                              maybe'context :: forall f s t a b. HasLens "maybe'context" f s t a b => LensLike f s t a b Source #

                              maybe'featureLists :: forall f s t a b. HasLens "maybe'featureLists" f s t a b => LensLike f s t a b Source #

                              maybe'features :: forall f s t a b. HasLens "maybe'features" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-ExampleParserConfiguration.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-ExampleParserConfiguration.html new file mode 100644 index 0000000..62c137a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-ExampleParserConfiguration.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Example.ExampleParserConfiguration

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Example.ExampleParserConfiguration

                              Documentation

                              data ExampleParserConfiguration Source #

                              Instances

                              Eq ExampleParserConfiguration Source # 
                              Ord ExampleParserConfiguration Source # 
                              Show ExampleParserConfiguration Source # 
                              Message ExampleParserConfiguration Source # 

                              Methods

                              descriptor :: MessageDescriptor ExampleParserConfiguration

                              Default ExampleParserConfiguration Source # 
                              ((~) * a (Map Text FeatureConfiguration), (~) * b (Map Text FeatureConfiguration), Functor f) => HasLens "featureMap" f ExampleParserConfiguration ExampleParserConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "featureMap" -> (a -> f b) -> ExampleParserConfiguration -> f ExampleParserConfiguration

                              data ExampleParserConfiguration'FeatureMapEntry Source #

                              Instances

                              Eq ExampleParserConfiguration'FeatureMapEntry Source # 
                              Ord ExampleParserConfiguration'FeatureMapEntry Source # 
                              Show ExampleParserConfiguration'FeatureMapEntry Source # 
                              Message ExampleParserConfiguration'FeatureMapEntry Source # 
                              Default ExampleParserConfiguration'FeatureMapEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f ExampleParserConfiguration'FeatureMapEntry ExampleParserConfiguration'FeatureMapEntry a b Source # 
                              ((~) * a (Maybe FeatureConfiguration), (~) * b (Maybe FeatureConfiguration), Functor f) => HasLens "maybe'value" f ExampleParserConfiguration'FeatureMapEntry ExampleParserConfiguration'FeatureMapEntry a b Source # 
                              ((~) * a FeatureConfiguration, (~) * b FeatureConfiguration, Functor f) => HasLens "value" f ExampleParserConfiguration'FeatureMapEntry ExampleParserConfiguration'FeatureMapEntry a b Source # 

                              data FeatureConfiguration Source #

                              Instances

                              Eq FeatureConfiguration Source # 
                              Ord FeatureConfiguration Source # 
                              Show FeatureConfiguration Source # 
                              Message FeatureConfiguration Source # 

                              Methods

                              descriptor :: MessageDescriptor FeatureConfiguration

                              Default FeatureConfiguration Source # 
                              ((~) * a FixedLenFeatureProto, (~) * b FixedLenFeatureProto, Functor f) => HasLens "fixedLenFeature" f FeatureConfiguration FeatureConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "fixedLenFeature" -> (a -> f b) -> FeatureConfiguration -> f FeatureConfiguration

                              ((~) * a (Maybe FeatureConfiguration'Config), (~) * b (Maybe FeatureConfiguration'Config), Functor f) => HasLens "maybe'config" f FeatureConfiguration FeatureConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'config" -> (a -> f b) -> FeatureConfiguration -> f FeatureConfiguration

                              ((~) * a (Maybe FixedLenFeatureProto), (~) * b (Maybe FixedLenFeatureProto), Functor f) => HasLens "maybe'fixedLenFeature" f FeatureConfiguration FeatureConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'fixedLenFeature" -> (a -> f b) -> FeatureConfiguration -> f FeatureConfiguration

                              ((~) * a (Maybe VarLenFeatureProto), (~) * b (Maybe VarLenFeatureProto), Functor f) => HasLens "maybe'varLenFeature" f FeatureConfiguration FeatureConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'varLenFeature" -> (a -> f b) -> FeatureConfiguration -> f FeatureConfiguration

                              ((~) * a VarLenFeatureProto, (~) * b VarLenFeatureProto, Functor f) => HasLens "varLenFeature" f FeatureConfiguration FeatureConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "varLenFeature" -> (a -> f b) -> FeatureConfiguration -> f FeatureConfiguration

                              data FixedLenFeatureProto Source #

                              Instances

                              Eq FixedLenFeatureProto Source # 
                              Ord FixedLenFeatureProto Source # 
                              Show FixedLenFeatureProto Source # 
                              Message FixedLenFeatureProto Source # 

                              Methods

                              descriptor :: MessageDescriptor FixedLenFeatureProto

                              Default FixedLenFeatureProto Source # 
                              ((~) * a TensorProto, (~) * b TensorProto, Functor f) => HasLens "defaultValue" f FixedLenFeatureProto FixedLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "defaultValue" -> (a -> f b) -> FixedLenFeatureProto -> f FixedLenFeatureProto

                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "dtype" f FixedLenFeatureProto FixedLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "dtype" -> (a -> f b) -> FixedLenFeatureProto -> f FixedLenFeatureProto

                              ((~) * a (Maybe TensorProto), (~) * b (Maybe TensorProto), Functor f) => HasLens "maybe'defaultValue" f FixedLenFeatureProto FixedLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'defaultValue" -> (a -> f b) -> FixedLenFeatureProto -> f FixedLenFeatureProto

                              ((~) * a (Maybe TensorShapeProto), (~) * b (Maybe TensorShapeProto), Functor f) => HasLens "maybe'shape" f FixedLenFeatureProto FixedLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'shape" -> (a -> f b) -> FixedLenFeatureProto -> f FixedLenFeatureProto

                              ((~) * a TensorShapeProto, (~) * b TensorShapeProto, Functor f) => HasLens "shape" f FixedLenFeatureProto FixedLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "shape" -> (a -> f b) -> FixedLenFeatureProto -> f FixedLenFeatureProto

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "valuesOutputTensorName" f FixedLenFeatureProto FixedLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "valuesOutputTensorName" -> (a -> f b) -> FixedLenFeatureProto -> f FixedLenFeatureProto

                              data VarLenFeatureProto Source #

                              Instances

                              Eq VarLenFeatureProto Source # 
                              Ord VarLenFeatureProto Source # 
                              Show VarLenFeatureProto Source # 
                              Message VarLenFeatureProto Source # 

                              Methods

                              descriptor :: MessageDescriptor VarLenFeatureProto

                              Default VarLenFeatureProto Source # 
                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "dtype" f VarLenFeatureProto VarLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "dtype" -> (a -> f b) -> VarLenFeatureProto -> f VarLenFeatureProto

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "indicesOutputTensorName" f VarLenFeatureProto VarLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "indicesOutputTensorName" -> (a -> f b) -> VarLenFeatureProto -> f VarLenFeatureProto

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "shapesOutputTensorName" f VarLenFeatureProto VarLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "shapesOutputTensorName" -> (a -> f b) -> VarLenFeatureProto -> f VarLenFeatureProto

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "valuesOutputTensorName" f VarLenFeatureProto VarLenFeatureProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "valuesOutputTensorName" -> (a -> f b) -> VarLenFeatureProto -> f VarLenFeatureProto

                              defaultValue :: forall f s t a b. HasLens "defaultValue" f s t a b => LensLike f s t a b Source #

                              dtype :: forall f s t a b. HasLens "dtype" f s t a b => LensLike f s t a b Source #

                              featureMap :: forall f s t a b. HasLens "featureMap" f s t a b => LensLike f s t a b Source #

                              fixedLenFeature :: forall f s t a b. HasLens "fixedLenFeature" f s t a b => LensLike f s t a b Source #

                              indicesOutputTensorName :: forall f s t a b. HasLens "indicesOutputTensorName" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              maybe'config :: forall f s t a b. HasLens "maybe'config" f s t a b => LensLike f s t a b Source #

                              maybe'defaultValue :: forall f s t a b. HasLens "maybe'defaultValue" f s t a b => LensLike f s t a b Source #

                              maybe'fixedLenFeature :: forall f s t a b. HasLens "maybe'fixedLenFeature" f s t a b => LensLike f s t a b Source #

                              maybe'shape :: forall f s t a b. HasLens "maybe'shape" f s t a b => LensLike f s t a b Source #

                              maybe'value :: forall f s t a b. HasLens "maybe'value" f s t a b => LensLike f s t a b Source #

                              maybe'varLenFeature :: forall f s t a b. HasLens "maybe'varLenFeature" f s t a b => LensLike f s t a b Source #

                              shape :: forall f s t a b. HasLens "shape" f s t a b => LensLike f s t a b Source #

                              shapesOutputTensorName :: forall f s t a b. HasLens "shapesOutputTensorName" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              valuesOutputTensorName :: forall f s t a b. HasLens "valuesOutputTensorName" f s t a b => LensLike f s t a b Source #

                              varLenFeature :: forall f s t a b. HasLens "varLenFeature" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-Feature.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-Feature.html new file mode 100644 index 0000000..586d7d2 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Example-Feature.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Example.Feature

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Example.Feature

                              Documentation

                              data BytesList Source #

                              Constructors

                              BytesList 

                              Instances

                              data Feature Source #

                              Constructors

                              Feature 

                              Instances

                              Eq Feature Source # 

                              Methods

                              (==) :: Feature -> Feature -> Bool #

                              (/=) :: Feature -> Feature -> Bool #

                              Ord Feature Source # 
                              Show Feature Source # 
                              Message Feature Source # 

                              Methods

                              descriptor :: MessageDescriptor Feature

                              Default Feature Source # 

                              Methods

                              def :: Feature

                              ((~) * a BytesList, (~) * b BytesList, Functor f) => HasLens "bytesList" f Feature Feature a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "bytesList" -> (a -> f b) -> Feature -> f Feature

                              ((~) * a FloatList, (~) * b FloatList, Functor f) => HasLens "floatList" f Feature Feature a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "floatList" -> (a -> f b) -> Feature -> f Feature

                              ((~) * a Int64List, (~) * b Int64List, Functor f) => HasLens "int64List" f Feature Feature a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "int64List" -> (a -> f b) -> Feature -> f Feature

                              ((~) * a (Maybe BytesList), (~) * b (Maybe BytesList), Functor f) => HasLens "maybe'bytesList" f Feature Feature a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'bytesList" -> (a -> f b) -> Feature -> f Feature

                              ((~) * a (Maybe FloatList), (~) * b (Maybe FloatList), Functor f) => HasLens "maybe'floatList" f Feature Feature a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'floatList" -> (a -> f b) -> Feature -> f Feature

                              ((~) * a (Maybe Int64List), (~) * b (Maybe Int64List), Functor f) => HasLens "maybe'int64List" f Feature Feature a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'int64List" -> (a -> f b) -> Feature -> f Feature

                              ((~) * a (Maybe Feature'Kind), (~) * b (Maybe Feature'Kind), Functor f) => HasLens "maybe'kind" f Feature Feature a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'kind" -> (a -> f b) -> Feature -> f Feature

                              data FeatureLists'FeatureListEntry Source #

                              Instances

                              Eq FeatureLists'FeatureListEntry Source # 
                              Ord FeatureLists'FeatureListEntry Source # 
                              Show FeatureLists'FeatureListEntry Source # 
                              Message FeatureLists'FeatureListEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor FeatureLists'FeatureListEntry

                              Default FeatureLists'FeatureListEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f FeatureLists'FeatureListEntry FeatureLists'FeatureListEntry a b Source # 
                              ((~) * a (Maybe FeatureList), (~) * b (Maybe FeatureList), Functor f) => HasLens "maybe'value" f FeatureLists'FeatureListEntry FeatureLists'FeatureListEntry a b Source # 
                              ((~) * a FeatureList, (~) * b FeatureList, Functor f) => HasLens "value" f FeatureLists'FeatureListEntry FeatureLists'FeatureListEntry a b Source # 

                              data Features Source #

                              Constructors

                              Features 

                              Fields

                              Instances

                              Eq Features Source # 
                              Ord Features Source # 
                              Show Features Source # 
                              Message Features Source # 

                              Methods

                              descriptor :: MessageDescriptor Features

                              Default Features Source # 

                              Methods

                              def :: Features

                              ((~) * a (Map Text Feature), (~) * b (Map Text Feature), Functor f) => HasLens "feature" f Features Features a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "feature" -> (a -> f b) -> Features -> f Features

                              data Features'FeatureEntry Source #

                              Instances

                              Eq Features'FeatureEntry Source # 
                              Ord Features'FeatureEntry Source # 
                              Show Features'FeatureEntry Source # 
                              Message Features'FeatureEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor Features'FeatureEntry

                              Default Features'FeatureEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f Features'FeatureEntry Features'FeatureEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "key" -> (a -> f b) -> Features'FeatureEntry -> f Features'FeatureEntry

                              ((~) * a (Maybe Feature), (~) * b (Maybe Feature), Functor f) => HasLens "maybe'value" f Features'FeatureEntry Features'FeatureEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'value" -> (a -> f b) -> Features'FeatureEntry -> f Features'FeatureEntry

                              ((~) * a Feature, (~) * b Feature, Functor f) => HasLens "value" f Features'FeatureEntry Features'FeatureEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "value" -> (a -> f b) -> Features'FeatureEntry -> f Features'FeatureEntry

                              data FloatList Source #

                              Constructors

                              FloatList 

                              Fields

                              Instances

                              Eq FloatList Source # 
                              Ord FloatList Source # 
                              Show FloatList Source # 
                              Message FloatList Source # 

                              Methods

                              descriptor :: MessageDescriptor FloatList

                              Default FloatList Source # 

                              Methods

                              def :: FloatList

                              ((~) * a [Float], (~) * b [Float], Functor f) => HasLens "value" f FloatList FloatList a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "value" -> (a -> f b) -> FloatList -> f FloatList

                              data Int64List Source #

                              Constructors

                              Int64List 

                              Fields

                              Instances

                              Eq Int64List Source # 
                              Ord Int64List Source # 
                              Show Int64List Source # 
                              Message Int64List Source # 

                              Methods

                              descriptor :: MessageDescriptor Int64List

                              Default Int64List Source # 

                              Methods

                              def :: Int64List

                              ((~) * a [Int64], (~) * b [Int64], Functor f) => HasLens "value" f Int64List Int64List a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "value" -> (a -> f b) -> Int64List -> f Int64List

                              bytesList :: forall f s t a b. HasLens "bytesList" f s t a b => LensLike f s t a b Source #

                              feature :: forall f s t a b. HasLens "feature" f s t a b => LensLike f s t a b Source #

                              featureList :: forall f s t a b. HasLens "featureList" f s t a b => LensLike f s t a b Source #

                              floatList :: forall f s t a b. HasLens "floatList" f s t a b => LensLike f s t a b Source #

                              int64List :: forall f s t a b. HasLens "int64List" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              maybe'bytesList :: forall f s t a b. HasLens "maybe'bytesList" f s t a b => LensLike f s t a b Source #

                              maybe'floatList :: forall f s t a b. HasLens "maybe'floatList" f s t a b => LensLike f s t a b Source #

                              maybe'int64List :: forall f s t a b. HasLens "maybe'int64List" f s t a b => LensLike f s t a b Source #

                              maybe'kind :: forall f s t a b. HasLens "maybe'kind" f s t a b => LensLike f s t a b Source #

                              maybe'value :: forall f s t a b. HasLens "maybe'value" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AllocationDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AllocationDescription.html new file mode 100644 index 0000000..46bf0f6 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AllocationDescription.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.AllocationDescription

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.AllocationDescription

                              Documentation

                              data AllocationDescription Source #

                              Instances

                              Eq AllocationDescription Source # 
                              Ord AllocationDescription Source # 
                              Show AllocationDescription Source # 
                              Message AllocationDescription Source # 

                              Methods

                              descriptor :: MessageDescriptor AllocationDescription

                              Default AllocationDescription Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "allocatedBytes" f AllocationDescription AllocationDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocatedBytes" -> (a -> f b) -> AllocationDescription -> f AllocationDescription

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "allocationId" f AllocationDescription AllocationDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocationId" -> (a -> f b) -> AllocationDescription -> f AllocationDescription

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "allocatorName" f AllocationDescription AllocationDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocatorName" -> (a -> f b) -> AllocationDescription -> f AllocationDescription

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "hasSingleReference" f AllocationDescription AllocationDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hasSingleReference" -> (a -> f b) -> AllocationDescription -> f AllocationDescription

                              ((~) * a Word64, (~) * b Word64, Functor f) => HasLens "ptr" f AllocationDescription AllocationDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "ptr" -> (a -> f b) -> AllocationDescription -> f AllocationDescription

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "requestedBytes" f AllocationDescription AllocationDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "requestedBytes" -> (a -> f b) -> AllocationDescription -> f AllocationDescription

                              allocatedBytes :: forall f s t a b. HasLens "allocatedBytes" f s t a b => LensLike f s t a b Source #

                              allocationId :: forall f s t a b. HasLens "allocationId" f s t a b => LensLike f s t a b Source #

                              allocatorName :: forall f s t a b. HasLens "allocatorName" f s t a b => LensLike f s t a b Source #

                              hasSingleReference :: forall f s t a b. HasLens "hasSingleReference" f s t a b => LensLike f s t a b Source #

                              ptr :: forall f s t a b. HasLens "ptr" f s t a b => LensLike f s t a b Source #

                              requestedBytes :: forall f s t a b. HasLens "requestedBytes" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html index d61dd5f..d85ae36 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.AttrValue

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.AttrValue

                              Documentation

                              data AttrValue

                              Instances

                              Eq AttrValue 
                              Show AttrValue 
                              Message AttrValue 
                              Default AttrValue 
                              HasField "b" AttrValue AttrValue 
                              HasField "f" AttrValue AttrValue 
                              HasField "func" AttrValue AttrValue 
                              HasField "i" AttrValue AttrValue 
                              HasField "list" AttrValue AttrValue 
                              HasField "maybe'b" AttrValue AttrValue 
                              HasField "maybe'f" AttrValue AttrValue 
                              HasField "maybe'func" AttrValue AttrValue 
                              HasField "maybe'i" AttrValue AttrValue 
                              HasField "maybe'list" AttrValue AttrValue 
                              HasField "maybe'placeholder" AttrValue AttrValue 
                              HasField "maybe's" AttrValue AttrValue 
                              HasField "maybe'shape" AttrValue AttrValue 
                              HasField "maybe'tensor" AttrValue AttrValue 
                              HasField "maybe'type'" AttrValue AttrValue 
                              HasField "placeholder" AttrValue AttrValue 
                              HasField "s" AttrValue AttrValue 
                              HasField "shape" AttrValue AttrValue 
                              HasField "tensor" AttrValue AttrValue 
                              HasField "type'" AttrValue AttrValue 
                              type Field "b" AttrValue = Bool 
                              type Field "f" AttrValue = Float 
                              type Field "func" AttrValue = NameAttrList 
                              type Field "i" AttrValue = Int64 
                              type Field "list" AttrValue = AttrValue'ListValue 
                              type Field "maybe'b" AttrValue = Maybe Bool 
                              type Field "maybe'f" AttrValue = Maybe Float 
                              type Field "maybe'func" AttrValue = Maybe NameAttrList 
                              type Field "maybe'i" AttrValue = Maybe Int64 
                              type Field "maybe'list" AttrValue = Maybe AttrValue'ListValue 
                              type Field "maybe'placeholder" AttrValue = Maybe Text 
                              type Field "maybe's" AttrValue = Maybe ByteString 
                              type Field "maybe'shape" AttrValue = Maybe TensorShapeProto 
                              type Field "maybe'tensor" AttrValue = Maybe TensorProto 
                              type Field "maybe'type'" AttrValue = Maybe DataType 
                              type Field "placeholder" AttrValue = Text 
                              type Field "s" AttrValue = ByteString 
                              type Field "shape" AttrValue = TensorShapeProto 
                              type Field "tensor" AttrValue = TensorProto 
                              type Field "type'" AttrValue = DataType 

                              data NameAttrList

                              Constructors

                              NameAttrList 

                              Fields

                              _NameAttrList'name :: !Text
                               
                              _NameAttrList'attr :: !(Map Text AttrValue)
                               

                              Instances

                              Eq NameAttrList 
                              Show NameAttrList 
                              Message NameAttrList 
                              Default NameAttrList 
                              HasField "attr" NameAttrList NameAttrList 
                              HasField "name" NameAttrList NameAttrList 
                              type Field "attr" NameAttrList = Map Text AttrValue 
                              type Field "name" NameAttrList = Text 

                              attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg')

                              b :: forall msg msg'. HasField "b" msg msg' => Lens msg msg' (Field "b" msg) (Field "b" msg')

                              f :: forall msg msg'. HasField "f" msg msg' => Lens msg msg' (Field "f" msg) (Field "f" msg')

                              func :: forall msg msg'. HasField "func" msg msg' => Lens msg msg' (Field "func" msg) (Field "func" msg')

                              i :: forall msg msg'. HasField "i" msg msg' => Lens msg msg' (Field "i" msg) (Field "i" msg')

                              key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg')

                              list :: forall msg msg'. HasField "list" msg msg' => Lens msg msg' (Field "list" msg) (Field "list" msg')

                              maybe'b :: forall msg msg'. HasField "maybe'b" msg msg' => Lens msg msg' (Field "maybe'b" msg) (Field "maybe'b" msg')

                              maybe'f :: forall msg msg'. HasField "maybe'f" msg msg' => Lens msg msg' (Field "maybe'f" msg) (Field "maybe'f" msg')

                              maybe'func :: forall msg msg'. HasField "maybe'func" msg msg' => Lens msg msg' (Field "maybe'func" msg) (Field "maybe'func" msg')

                              maybe'i :: forall msg msg'. HasField "maybe'i" msg msg' => Lens msg msg' (Field "maybe'i" msg) (Field "maybe'i" msg')

                              maybe'list :: forall msg msg'. HasField "maybe'list" msg msg' => Lens msg msg' (Field "maybe'list" msg) (Field "maybe'list" msg')

                              maybe'placeholder :: forall msg msg'. HasField "maybe'placeholder" msg msg' => Lens msg msg' (Field "maybe'placeholder" msg) (Field "maybe'placeholder" msg')

                              maybe's :: forall msg msg'. HasField "maybe's" msg msg' => Lens msg msg' (Field "maybe's" msg) (Field "maybe's" msg')

                              maybe'shape :: forall msg msg'. HasField "maybe'shape" msg msg' => Lens msg msg' (Field "maybe'shape" msg) (Field "maybe'shape" msg')

                              maybe'tensor :: forall msg msg'. HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg')

                              maybe'type' :: forall msg msg'. HasField "maybe'type'" msg msg' => Lens msg msg' (Field "maybe'type'" msg) (Field "maybe'type'" msg')

                              maybe'value :: forall msg msg'. HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg')

                              name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

                              placeholder :: forall msg msg'. HasField "placeholder" msg msg' => Lens msg msg' (Field "placeholder" msg) (Field "placeholder" msg')

                              s :: forall msg msg'. HasField "s" msg msg' => Lens msg msg' (Field "s" msg) (Field "s" msg')

                              shape :: forall msg msg'. HasField "shape" msg msg' => Lens msg msg' (Field "shape" msg) (Field "shape" msg')

                              tensor :: forall msg msg'. HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg')

                              type' :: forall msg msg'. HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg')

                              value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.AttrValue

                              Documentation

                              data AttrValue Source #

                              Constructors

                              AttrValue 

                              Instances

                              Eq AttrValue Source # 
                              Ord AttrValue Source # 
                              Show AttrValue Source # 
                              Message AttrValue Source # 

                              Methods

                              descriptor :: MessageDescriptor AttrValue

                              Default AttrValue Source # 

                              Methods

                              def :: AttrValue

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "b" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "b" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a Float, (~) * b Float, Functor f) => HasLens "f" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "f" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a NameAttrList, (~) * b NameAttrList, Functor f) => HasLens "func" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "func" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "i" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "i" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a AttrValue'ListValue, (~) * b AttrValue'ListValue, Functor f) => HasLens "list" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "list" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe Bool), (~) * b (Maybe Bool), Functor f) => HasLens "maybe'b" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'b" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe Float), (~) * b (Maybe Float), Functor f) => HasLens "maybe'f" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'f" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe NameAttrList), (~) * b (Maybe NameAttrList), Functor f) => HasLens "maybe'func" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'func" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe Int64), (~) * b (Maybe Int64), Functor f) => HasLens "maybe'i" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'i" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe AttrValue'ListValue), (~) * b (Maybe AttrValue'ListValue), Functor f) => HasLens "maybe'list" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'list" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe Text), (~) * b (Maybe Text), Functor f) => HasLens "maybe'placeholder" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'placeholder" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe ByteString), (~) * b (Maybe ByteString), Functor f) => HasLens "maybe's" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe's" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe TensorShapeProto), (~) * b (Maybe TensorShapeProto), Functor f) => HasLens "maybe'shape" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'shape" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe TensorProto), (~) * b (Maybe TensorProto), Functor f) => HasLens "maybe'tensor" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'tensor" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe DataType), (~) * b (Maybe DataType), Functor f) => HasLens "maybe'type'" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'type'" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a (Maybe AttrValue'Value), (~) * b (Maybe AttrValue'Value), Functor f) => HasLens "maybe'value" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'value" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "placeholder" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "placeholder" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a ByteString, (~) * b ByteString, Functor f) => HasLens "s" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "s" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a TensorShapeProto, (~) * b TensorShapeProto, Functor f) => HasLens "shape" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "shape" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a TensorProto, (~) * b TensorProto, Functor f) => HasLens "tensor" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensor" -> (a -> f b) -> AttrValue -> f AttrValue

                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "type'" f AttrValue AttrValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "type'" -> (a -> f b) -> AttrValue -> f AttrValue

                              data AttrValue'ListValue Source #

                              Instances

                              Eq AttrValue'ListValue Source # 
                              Ord AttrValue'ListValue Source # 
                              Show AttrValue'ListValue Source # 
                              Message AttrValue'ListValue Source # 

                              Methods

                              descriptor :: MessageDescriptor AttrValue'ListValue

                              Default AttrValue'ListValue Source # 
                              ((~) * a [Bool], (~) * b [Bool], Functor f) => HasLens "b" f AttrValue'ListValue AttrValue'ListValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "b" -> (a -> f b) -> AttrValue'ListValue -> f AttrValue'ListValue

                              ((~) * a [Float], (~) * b [Float], Functor f) => HasLens "f" f AttrValue'ListValue AttrValue'ListValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "f" -> (a -> f b) -> AttrValue'ListValue -> f AttrValue'ListValue

                              ((~) * a [NameAttrList], (~) * b [NameAttrList], Functor f) => HasLens "func" f AttrValue'ListValue AttrValue'ListValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "func" -> (a -> f b) -> AttrValue'ListValue -> f AttrValue'ListValue

                              ((~) * a [Int64], (~) * b [Int64], Functor f) => HasLens "i" f AttrValue'ListValue AttrValue'ListValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "i" -> (a -> f b) -> AttrValue'ListValue -> f AttrValue'ListValue

                              ((~) * a [ByteString], (~) * b [ByteString], Functor f) => HasLens "s" f AttrValue'ListValue AttrValue'ListValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "s" -> (a -> f b) -> AttrValue'ListValue -> f AttrValue'ListValue

                              ((~) * a [TensorShapeProto], (~) * b [TensorShapeProto], Functor f) => HasLens "shape" f AttrValue'ListValue AttrValue'ListValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "shape" -> (a -> f b) -> AttrValue'ListValue -> f AttrValue'ListValue

                              ((~) * a [TensorProto], (~) * b [TensorProto], Functor f) => HasLens "tensor" f AttrValue'ListValue AttrValue'ListValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensor" -> (a -> f b) -> AttrValue'ListValue -> f AttrValue'ListValue

                              ((~) * a [DataType], (~) * b [DataType], Functor f) => HasLens "type'" f AttrValue'ListValue AttrValue'ListValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "type'" -> (a -> f b) -> AttrValue'ListValue -> f AttrValue'ListValue

                              data NameAttrList'AttrEntry Source #

                              Instances

                              Eq NameAttrList'AttrEntry Source # 
                              Ord NameAttrList'AttrEntry Source # 
                              Show NameAttrList'AttrEntry Source # 
                              Message NameAttrList'AttrEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor NameAttrList'AttrEntry

                              Default NameAttrList'AttrEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f NameAttrList'AttrEntry NameAttrList'AttrEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "key" -> (a -> f b) -> NameAttrList'AttrEntry -> f NameAttrList'AttrEntry

                              ((~) * a (Maybe AttrValue), (~) * b (Maybe AttrValue), Functor f) => HasLens "maybe'value" f NameAttrList'AttrEntry NameAttrList'AttrEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'value" -> (a -> f b) -> NameAttrList'AttrEntry -> f NameAttrList'AttrEntry

                              ((~) * a AttrValue, (~) * b AttrValue, Functor f) => HasLens "value" f NameAttrList'AttrEntry NameAttrList'AttrEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "value" -> (a -> f b) -> NameAttrList'AttrEntry -> f NameAttrList'AttrEntry

                              attr :: forall f s t a b. HasLens "attr" f s t a b => LensLike f s t a b Source #

                              b :: forall f s t a b. HasLens "b" f s t a b => LensLike f s t a b Source #

                              f :: forall f s t a b. HasLens "f" f s t a b => LensLike f s t a b Source #

                              func :: forall f s t a b. HasLens "func" f s t a b => LensLike f s t a b Source #

                              i :: forall f s t a b. HasLens "i" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              list :: forall f s t a b. HasLens "list" f s t a b => LensLike f s t a b Source #

                              maybe'b :: forall f s t a b. HasLens "maybe'b" f s t a b => LensLike f s t a b Source #

                              maybe'f :: forall f s t a b. HasLens "maybe'f" f s t a b => LensLike f s t a b Source #

                              maybe'func :: forall f s t a b. HasLens "maybe'func" f s t a b => LensLike f s t a b Source #

                              maybe'i :: forall f s t a b. HasLens "maybe'i" f s t a b => LensLike f s t a b Source #

                              maybe'list :: forall f s t a b. HasLens "maybe'list" f s t a b => LensLike f s t a b Source #

                              maybe'placeholder :: forall f s t a b. HasLens "maybe'placeholder" f s t a b => LensLike f s t a b Source #

                              maybe's :: forall f s t a b. HasLens "maybe's" f s t a b => LensLike f s t a b Source #

                              maybe'shape :: forall f s t a b. HasLens "maybe'shape" f s t a b => LensLike f s t a b Source #

                              maybe'tensor :: forall f s t a b. HasLens "maybe'tensor" f s t a b => LensLike f s t a b Source #

                              maybe'type' :: forall f s t a b. HasLens "maybe'type'" f s t a b => LensLike f s t a b Source #

                              maybe'value :: forall f s t a b. HasLens "maybe'value" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              placeholder :: forall f s t a b. HasLens "placeholder" f s t a b => LensLike f s t a b Source #

                              s :: forall f s t a b. HasLens "s" f s t a b => LensLike f s t a b Source #

                              shape :: forall f s t a b. HasLens "shape" f s t a b => LensLike f s t a b Source #

                              tensor :: forall f s t a b. HasLens "tensor" f s t a b => LensLike f s t a b Source #

                              type' :: forall f s t a b. HasLens "type'" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-CostGraph.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-CostGraph.html new file mode 100644 index 0000000..38e0a4b --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-CostGraph.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.CostGraph

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.CostGraph

                              Documentation

                              data CostGraphDef'Node Source #

                              Instances

                              Eq CostGraphDef'Node Source # 
                              Ord CostGraphDef'Node Source # 
                              Show CostGraphDef'Node Source # 
                              Message CostGraphDef'Node Source # 

                              Methods

                              descriptor :: MessageDescriptor CostGraphDef'Node

                              Default CostGraphDef'Node Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "computeCost" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "computeCost" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "computeTime" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "computeTime" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a [Int32], (~) * b [Int32], Functor f) => HasLens "controlInput" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "controlInput" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "device" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "device" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "devicePersistentMemorySize" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "devicePersistentMemorySize" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "deviceTempMemorySize" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deviceTempMemorySize" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "hostPersistentMemorySize" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hostPersistentMemorySize" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "hostTempMemorySize" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hostTempMemorySize" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "id" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "id" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a [CostGraphDef'Node'InputInfo], (~) * b [CostGraphDef'Node'InputInfo], Functor f) => HasLens "inputInfo" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "inputInfo" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "isFinal" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "isFinal" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "memoryTime" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "memoryTime" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a [CostGraphDef'Node'OutputInfo], (~) * b [CostGraphDef'Node'OutputInfo], Functor f) => HasLens "outputInfo" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "outputInfo" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "temporaryMemorySize" f CostGraphDef'Node CostGraphDef'Node a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "temporaryMemorySize" -> (a -> f b) -> CostGraphDef'Node -> f CostGraphDef'Node

                              data CostGraphDef'Node'InputInfo Source #

                              Instances

                              Eq CostGraphDef'Node'InputInfo Source # 
                              Ord CostGraphDef'Node'InputInfo Source # 
                              Show CostGraphDef'Node'InputInfo Source # 
                              Message CostGraphDef'Node'InputInfo Source # 

                              Methods

                              descriptor :: MessageDescriptor CostGraphDef'Node'InputInfo

                              Default CostGraphDef'Node'InputInfo Source # 
                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "precedingNode" f CostGraphDef'Node'InputInfo CostGraphDef'Node'InputInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "precedingNode" -> (a -> f b) -> CostGraphDef'Node'InputInfo -> f CostGraphDef'Node'InputInfo

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "precedingPort" f CostGraphDef'Node'InputInfo CostGraphDef'Node'InputInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "precedingPort" -> (a -> f b) -> CostGraphDef'Node'InputInfo -> f CostGraphDef'Node'InputInfo

                              data CostGraphDef'Node'OutputInfo Source #

                              Instances

                              Eq CostGraphDef'Node'OutputInfo Source # 
                              Ord CostGraphDef'Node'OutputInfo Source # 
                              Show CostGraphDef'Node'OutputInfo Source # 
                              Message CostGraphDef'Node'OutputInfo Source # 

                              Methods

                              descriptor :: MessageDescriptor CostGraphDef'Node'OutputInfo

                              Default CostGraphDef'Node'OutputInfo Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "aliasInputPort" f CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "aliasInputPort" -> (a -> f b) -> CostGraphDef'Node'OutputInfo -> f CostGraphDef'Node'OutputInfo

                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "dtype" f CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo a b Source # 
                              ((~) * a (Maybe TensorShapeProto), (~) * b (Maybe TensorShapeProto), Functor f) => HasLens "maybe'shape" f CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'shape" -> (a -> f b) -> CostGraphDef'Node'OutputInfo -> f CostGraphDef'Node'OutputInfo

                              ((~) * a TensorShapeProto, (~) * b TensorShapeProto, Functor f) => HasLens "shape" f CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo a b Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "size" f CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo a b Source # 

                              aliasInputPort :: forall f s t a b. HasLens "aliasInputPort" f s t a b => LensLike f s t a b Source #

                              computeCost :: forall f s t a b. HasLens "computeCost" f s t a b => LensLike f s t a b Source #

                              computeTime :: forall f s t a b. HasLens "computeTime" f s t a b => LensLike f s t a b Source #

                              controlInput :: forall f s t a b. HasLens "controlInput" f s t a b => LensLike f s t a b Source #

                              device :: forall f s t a b. HasLens "device" f s t a b => LensLike f s t a b Source #

                              devicePersistentMemorySize :: forall f s t a b. HasLens "devicePersistentMemorySize" f s t a b => LensLike f s t a b Source #

                              deviceTempMemorySize :: forall f s t a b. HasLens "deviceTempMemorySize" f s t a b => LensLike f s t a b Source #

                              dtype :: forall f s t a b. HasLens "dtype" f s t a b => LensLike f s t a b Source #

                              hostPersistentMemorySize :: forall f s t a b. HasLens "hostPersistentMemorySize" f s t a b => LensLike f s t a b Source #

                              hostTempMemorySize :: forall f s t a b. HasLens "hostTempMemorySize" f s t a b => LensLike f s t a b Source #

                              id :: forall f s t a b. HasLens "id" f s t a b => LensLike f s t a b Source #

                              inputInfo :: forall f s t a b. HasLens "inputInfo" f s t a b => LensLike f s t a b Source #

                              isFinal :: forall f s t a b. HasLens "isFinal" f s t a b => LensLike f s t a b Source #

                              maybe'shape :: forall f s t a b. HasLens "maybe'shape" f s t a b => LensLike f s t a b Source #

                              memoryTime :: forall f s t a b. HasLens "memoryTime" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              node :: forall f s t a b. HasLens "node" f s t a b => LensLike f s t a b Source #

                              outputInfo :: forall f s t a b. HasLens "outputInfo" f s t a b => LensLike f s t a b Source #

                              precedingNode :: forall f s t a b. HasLens "precedingNode" f s t a b => LensLike f s t a b Source #

                              precedingPort :: forall f s t a b. HasLens "precedingPort" f s t a b => LensLike f s t a b Source #

                              shape :: forall f s t a b. HasLens "shape" f s t a b => LensLike f s t a b Source #

                              size :: forall f s t a b. HasLens "size" f s t a b => LensLike f s t a b Source #

                              temporaryMemorySize :: forall f s t a b. HasLens "temporaryMemorySize" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-DeviceAttributes.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-DeviceAttributes.html new file mode 100644 index 0000000..fa1d7ee --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-DeviceAttributes.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.DeviceAttributes

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.DeviceAttributes

                              Documentation

                              data DeviceAttributes Source #

                              Instances

                              Eq DeviceAttributes Source # 
                              Ord DeviceAttributes Source # 
                              Show DeviceAttributes Source # 
                              Message DeviceAttributes Source # 

                              Methods

                              descriptor :: MessageDescriptor DeviceAttributes

                              Default DeviceAttributes Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "deviceType" f DeviceAttributes DeviceAttributes a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deviceType" -> (a -> f b) -> DeviceAttributes -> f DeviceAttributes

                              ((~) * a Word64, (~) * b Word64, Functor f) => HasLens "incarnation" f DeviceAttributes DeviceAttributes a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "incarnation" -> (a -> f b) -> DeviceAttributes -> f DeviceAttributes

                              ((~) * a DeviceLocality, (~) * b DeviceLocality, Functor f) => HasLens "locality" f DeviceAttributes DeviceAttributes a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "locality" -> (a -> f b) -> DeviceAttributes -> f DeviceAttributes

                              ((~) * a (Maybe DeviceLocality), (~) * b (Maybe DeviceLocality), Functor f) => HasLens "maybe'locality" f DeviceAttributes DeviceAttributes a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'locality" -> (a -> f b) -> DeviceAttributes -> f DeviceAttributes

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "memoryLimit" f DeviceAttributes DeviceAttributes a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "memoryLimit" -> (a -> f b) -> DeviceAttributes -> f DeviceAttributes

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f DeviceAttributes DeviceAttributes a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> DeviceAttributes -> f DeviceAttributes

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "physicalDeviceDesc" f DeviceAttributes DeviceAttributes a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "physicalDeviceDesc" -> (a -> f b) -> DeviceAttributes -> f DeviceAttributes

                              busId :: forall f s t a b. HasLens "busId" f s t a b => LensLike f s t a b Source #

                              deviceType :: forall f s t a b. HasLens "deviceType" f s t a b => LensLike f s t a b Source #

                              incarnation :: forall f s t a b. HasLens "incarnation" f s t a b => LensLike f s t a b Source #

                              locality :: forall f s t a b. HasLens "locality" f s t a b => LensLike f s t a b Source #

                              maybe'locality :: forall f s t a b. HasLens "maybe'locality" f s t a b => LensLike f s t a b Source #

                              memoryLimit :: forall f s t a b. HasLens "memoryLimit" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              physicalDeviceDesc :: forall f s t a b. HasLens "physicalDeviceDesc" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Function.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Function.html new file mode 100644 index 0000000..400a963 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Function.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Function

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Function

                              Documentation

                              data FunctionDef Source #

                              Instances

                              Eq FunctionDef Source # 
                              Ord FunctionDef Source # 
                              Show FunctionDef Source # 
                              Message FunctionDef Source # 

                              Methods

                              descriptor :: MessageDescriptor FunctionDef

                              Default FunctionDef Source # 

                              Methods

                              def :: FunctionDef

                              ((~) * a (Map Text AttrValue), (~) * b (Map Text AttrValue), Functor f) => HasLens "attr" f FunctionDef FunctionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "attr" -> (a -> f b) -> FunctionDef -> f FunctionDef

                              ((~) * a (Maybe OpDef), (~) * b (Maybe OpDef), Functor f) => HasLens "maybe'signature" f FunctionDef FunctionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'signature" -> (a -> f b) -> FunctionDef -> f FunctionDef

                              ((~) * a [NodeDef], (~) * b [NodeDef], Functor f) => HasLens "nodeDef" f FunctionDef FunctionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "nodeDef" -> (a -> f b) -> FunctionDef -> f FunctionDef

                              ((~) * a (Map Text Text), (~) * b (Map Text Text), Functor f) => HasLens "ret" f FunctionDef FunctionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "ret" -> (a -> f b) -> FunctionDef -> f FunctionDef

                              ((~) * a OpDef, (~) * b OpDef, Functor f) => HasLens "signature" f FunctionDef FunctionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "signature" -> (a -> f b) -> FunctionDef -> f FunctionDef

                              data FunctionDef'AttrEntry Source #

                              Instances

                              Eq FunctionDef'AttrEntry Source # 
                              Ord FunctionDef'AttrEntry Source # 
                              Show FunctionDef'AttrEntry Source # 
                              Message FunctionDef'AttrEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor FunctionDef'AttrEntry

                              Default FunctionDef'AttrEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f FunctionDef'AttrEntry FunctionDef'AttrEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "key" -> (a -> f b) -> FunctionDef'AttrEntry -> f FunctionDef'AttrEntry

                              ((~) * a (Maybe AttrValue), (~) * b (Maybe AttrValue), Functor f) => HasLens "maybe'value" f FunctionDef'AttrEntry FunctionDef'AttrEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'value" -> (a -> f b) -> FunctionDef'AttrEntry -> f FunctionDef'AttrEntry

                              ((~) * a AttrValue, (~) * b AttrValue, Functor f) => HasLens "value" f FunctionDef'AttrEntry FunctionDef'AttrEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "value" -> (a -> f b) -> FunctionDef'AttrEntry -> f FunctionDef'AttrEntry

                              data FunctionDef'RetEntry Source #

                              data FunctionDefLibrary Source #

                              data GradientDef Source #

                              Constructors

                              GradientDef 

                              Instances

                              Eq GradientDef Source # 
                              Ord GradientDef Source # 
                              Show GradientDef Source # 
                              Message GradientDef Source # 

                              Methods

                              descriptor :: MessageDescriptor GradientDef

                              Default GradientDef Source # 

                              Methods

                              def :: GradientDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "functionName" f GradientDef GradientDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "functionName" -> (a -> f b) -> GradientDef -> f GradientDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "gradientFunc" f GradientDef GradientDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "gradientFunc" -> (a -> f b) -> GradientDef -> f GradientDef

                              attr :: forall f s t a b. HasLens "attr" f s t a b => LensLike f s t a b Source #

                              function :: forall f s t a b. HasLens "function" f s t a b => LensLike f s t a b Source #

                              functionName :: forall f s t a b. HasLens "functionName" f s t a b => LensLike f s t a b Source #

                              gradient :: forall f s t a b. HasLens "gradient" f s t a b => LensLike f s t a b Source #

                              gradientFunc :: forall f s t a b. HasLens "gradientFunc" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              maybe'signature :: forall f s t a b. HasLens "maybe'signature" f s t a b => LensLike f s t a b Source #

                              maybe'value :: forall f s t a b. HasLens "maybe'value" f s t a b => LensLike f s t a b Source #

                              nodeDef :: forall f s t a b. HasLens "nodeDef" f s t a b => LensLike f s t a b Source #

                              ret :: forall f s t a b. HasLens "ret" f s t a b => LensLike f s t a b Source #

                              signature :: forall f s t a b. HasLens "signature" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html index 31b7974..c0edcc0 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.Graph

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Graph

                              Documentation

                              data GraphDef

                              Constructors

                              GraphDef 

                              Fields

                              _GraphDef'node :: ![NodeDef]
                               
                              _GraphDef'versions :: !(Maybe VersionDef)
                               
                              _GraphDef'version :: !Int32
                               
                              _GraphDef'library :: !(Maybe FunctionDefLibrary)
                               

                              Instances

                              Eq GraphDef 
                              Show GraphDef 
                              Message GraphDef 
                              Default GraphDef 
                              HasField "library" GraphDef GraphDef 
                              HasField "maybe'library" GraphDef GraphDef 
                              HasField "maybe'versions" GraphDef GraphDef 
                              HasField "node" GraphDef GraphDef 
                              HasField "version" GraphDef GraphDef 
                              HasField "versions" GraphDef GraphDef 
                              type Field "library" GraphDef 
                              type Field "maybe'library" GraphDef 
                              type Field "maybe'versions" GraphDef 
                              type Field "node" GraphDef = [NodeDef] 
                              type Field "version" GraphDef = Int32 
                              type Field "versions" GraphDef 

                              library :: forall msg msg'. HasField "library" msg msg' => Lens msg msg' (Field "library" msg) (Field "library" msg')

                              maybe'library :: forall msg msg'. HasField "maybe'library" msg msg' => Lens msg msg' (Field "maybe'library" msg) (Field "maybe'library" msg')

                              maybe'versions :: forall msg msg'. HasField "maybe'versions" msg msg' => Lens msg msg' (Field "maybe'versions" msg) (Field "maybe'versions" msg')

                              node :: forall msg msg'. HasField "node" msg msg' => Lens msg msg' (Field "node" msg) (Field "node" msg')

                              version :: forall msg msg'. HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg')

                              versions :: forall msg msg'. HasField "versions" msg msg' => Lens msg msg' (Field "versions" msg) (Field "versions" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Graph

                              Documentation

                              data GraphDef Source #

                              Instances

                              Eq GraphDef Source # 
                              Ord GraphDef Source # 
                              Show GraphDef Source # 
                              Message GraphDef Source # 

                              Methods

                              descriptor :: MessageDescriptor GraphDef

                              Default GraphDef Source # 

                              Methods

                              def :: GraphDef

                              ((~) * a FunctionDefLibrary, (~) * b FunctionDefLibrary, Functor f) => HasLens "library" f GraphDef GraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "library" -> (a -> f b) -> GraphDef -> f GraphDef

                              ((~) * a (Maybe FunctionDefLibrary), (~) * b (Maybe FunctionDefLibrary), Functor f) => HasLens "maybe'library" f GraphDef GraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'library" -> (a -> f b) -> GraphDef -> f GraphDef

                              ((~) * a (Maybe VersionDef), (~) * b (Maybe VersionDef), Functor f) => HasLens "maybe'versions" f GraphDef GraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'versions" -> (a -> f b) -> GraphDef -> f GraphDef

                              ((~) * a [NodeDef], (~) * b [NodeDef], Functor f) => HasLens "node" f GraphDef GraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "node" -> (a -> f b) -> GraphDef -> f GraphDef

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "version" f GraphDef GraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "version" -> (a -> f b) -> GraphDef -> f GraphDef

                              ((~) * a VersionDef, (~) * b VersionDef, Functor f) => HasLens "versions" f GraphDef GraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "versions" -> (a -> f b) -> GraphDef -> f GraphDef

                              library :: forall f s t a b. HasLens "library" f s t a b => LensLike f s t a b Source #

                              maybe'library :: forall f s t a b. HasLens "maybe'library" f s t a b => LensLike f s t a b Source #

                              maybe'versions :: forall f s t a b. HasLens "maybe'versions" f s t a b => LensLike f s t a b Source #

                              node :: forall f s t a b. HasLens "node" f s t a b => LensLike f s t a b Source #

                              version :: forall f s t a b. HasLens "version" f s t a b => LensLike f s t a b Source #

                              versions :: forall f s t a b. HasLens "versions" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-KernelDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-KernelDef.html new file mode 100644 index 0000000..90cf5be --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-KernelDef.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.KernelDef

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.KernelDef

                              Documentation

                              data KernelDef Source #

                              Instances

                              Eq KernelDef Source # 
                              Ord KernelDef Source # 
                              Show KernelDef Source # 
                              Message KernelDef Source # 

                              Methods

                              descriptor :: MessageDescriptor KernelDef

                              Default KernelDef Source # 

                              Methods

                              def :: KernelDef

                              ((~) * a [KernelDef'AttrConstraint], (~) * b [KernelDef'AttrConstraint], Functor f) => HasLens "constraint" f KernelDef KernelDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "constraint" -> (a -> f b) -> KernelDef -> f KernelDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "deviceType" f KernelDef KernelDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deviceType" -> (a -> f b) -> KernelDef -> f KernelDef

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "hostMemoryArg" f KernelDef KernelDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hostMemoryArg" -> (a -> f b) -> KernelDef -> f KernelDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "label" f KernelDef KernelDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "label" -> (a -> f b) -> KernelDef -> f KernelDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "op" f KernelDef KernelDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "op" -> (a -> f b) -> KernelDef -> f KernelDef

                              data KernelDef'AttrConstraint Source #

                              Instances

                              Eq KernelDef'AttrConstraint Source # 
                              Ord KernelDef'AttrConstraint Source # 
                              Show KernelDef'AttrConstraint Source # 
                              Message KernelDef'AttrConstraint Source # 

                              Methods

                              descriptor :: MessageDescriptor KernelDef'AttrConstraint

                              Default KernelDef'AttrConstraint Source # 
                              ((~) * a AttrValue, (~) * b AttrValue, Functor f) => HasLens "allowedValues" f KernelDef'AttrConstraint KernelDef'AttrConstraint a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allowedValues" -> (a -> f b) -> KernelDef'AttrConstraint -> f KernelDef'AttrConstraint

                              ((~) * a (Maybe AttrValue), (~) * b (Maybe AttrValue), Functor f) => HasLens "maybe'allowedValues" f KernelDef'AttrConstraint KernelDef'AttrConstraint a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'allowedValues" -> (a -> f b) -> KernelDef'AttrConstraint -> f KernelDef'AttrConstraint

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f KernelDef'AttrConstraint KernelDef'AttrConstraint a b Source # 

                              allowedValues :: forall f s t a b. HasLens "allowedValues" f s t a b => LensLike f s t a b Source #

                              constraint :: forall f s t a b. HasLens "constraint" f s t a b => LensLike f s t a b Source #

                              deviceType :: forall f s t a b. HasLens "deviceType" f s t a b => LensLike f s t a b Source #

                              hostMemoryArg :: forall f s t a b. HasLens "hostMemoryArg" f s t a b => LensLike f s t a b Source #

                              label :: forall f s t a b. HasLens "label" f s t a b => LensLike f s t a b Source #

                              maybe'allowedValues :: forall f s t a b. HasLens "maybe'allowedValues" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              op :: forall f s t a b. HasLens "op" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-LogMemory.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-LogMemory.html new file mode 100644 index 0000000..d9278c7 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-LogMemory.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.LogMemory

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.LogMemory

                              Documentation

                              data MemoryLogRawAllocation Source #

                              Instances

                              Eq MemoryLogRawAllocation Source # 
                              Ord MemoryLogRawAllocation Source # 
                              Show MemoryLogRawAllocation Source # 
                              Message MemoryLogRawAllocation Source # 

                              Methods

                              descriptor :: MessageDescriptor MemoryLogRawAllocation

                              Default MemoryLogRawAllocation Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "allocationId" f MemoryLogRawAllocation MemoryLogRawAllocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocationId" -> (a -> f b) -> MemoryLogRawAllocation -> f MemoryLogRawAllocation

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "allocatorName" f MemoryLogRawAllocation MemoryLogRawAllocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocatorName" -> (a -> f b) -> MemoryLogRawAllocation -> f MemoryLogRawAllocation

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "numBytes" f MemoryLogRawAllocation MemoryLogRawAllocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "numBytes" -> (a -> f b) -> MemoryLogRawAllocation -> f MemoryLogRawAllocation

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "operation" f MemoryLogRawAllocation MemoryLogRawAllocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "operation" -> (a -> f b) -> MemoryLogRawAllocation -> f MemoryLogRawAllocation

                              ((~) * a Word64, (~) * b Word64, Functor f) => HasLens "ptr" f MemoryLogRawAllocation MemoryLogRawAllocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "ptr" -> (a -> f b) -> MemoryLogRawAllocation -> f MemoryLogRawAllocation

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "stepId" f MemoryLogRawAllocation MemoryLogRawAllocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "stepId" -> (a -> f b) -> MemoryLogRawAllocation -> f MemoryLogRawAllocation

                              data MemoryLogRawDeallocation Source #

                              Instances

                              Eq MemoryLogRawDeallocation Source # 
                              Ord MemoryLogRawDeallocation Source # 
                              Show MemoryLogRawDeallocation Source # 
                              Message MemoryLogRawDeallocation Source # 

                              Methods

                              descriptor :: MessageDescriptor MemoryLogRawDeallocation

                              Default MemoryLogRawDeallocation Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "allocationId" f MemoryLogRawDeallocation MemoryLogRawDeallocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocationId" -> (a -> f b) -> MemoryLogRawDeallocation -> f MemoryLogRawDeallocation

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "allocatorName" f MemoryLogRawDeallocation MemoryLogRawDeallocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocatorName" -> (a -> f b) -> MemoryLogRawDeallocation -> f MemoryLogRawDeallocation

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "deferred" f MemoryLogRawDeallocation MemoryLogRawDeallocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deferred" -> (a -> f b) -> MemoryLogRawDeallocation -> f MemoryLogRawDeallocation

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "operation" f MemoryLogRawDeallocation MemoryLogRawDeallocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "operation" -> (a -> f b) -> MemoryLogRawDeallocation -> f MemoryLogRawDeallocation

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "stepId" f MemoryLogRawDeallocation MemoryLogRawDeallocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "stepId" -> (a -> f b) -> MemoryLogRawDeallocation -> f MemoryLogRawDeallocation

                              data MemoryLogTensorAllocation Source #

                              Instances

                              Eq MemoryLogTensorAllocation Source # 
                              Ord MemoryLogTensorAllocation Source # 
                              Show MemoryLogTensorAllocation Source # 
                              Message MemoryLogTensorAllocation Source # 

                              Methods

                              descriptor :: MessageDescriptor MemoryLogTensorAllocation

                              Default MemoryLogTensorAllocation Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "kernelName" f MemoryLogTensorAllocation MemoryLogTensorAllocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "kernelName" -> (a -> f b) -> MemoryLogTensorAllocation -> f MemoryLogTensorAllocation

                              ((~) * a (Maybe TensorDescription), (~) * b (Maybe TensorDescription), Functor f) => HasLens "maybe'tensor" f MemoryLogTensorAllocation MemoryLogTensorAllocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'tensor" -> (a -> f b) -> MemoryLogTensorAllocation -> f MemoryLogTensorAllocation

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "stepId" f MemoryLogTensorAllocation MemoryLogTensorAllocation a b Source # 
                              ((~) * a TensorDescription, (~) * b TensorDescription, Functor f) => HasLens "tensor" f MemoryLogTensorAllocation MemoryLogTensorAllocation a b Source # 

                              data MemoryLogTensorDeallocation Source #

                              Instances

                              Eq MemoryLogTensorDeallocation Source # 
                              Ord MemoryLogTensorDeallocation Source # 
                              Show MemoryLogTensorDeallocation Source # 
                              Message MemoryLogTensorDeallocation Source # 

                              Methods

                              descriptor :: MessageDescriptor MemoryLogTensorDeallocation

                              Default MemoryLogTensorDeallocation Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "allocationId" f MemoryLogTensorDeallocation MemoryLogTensorDeallocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocationId" -> (a -> f b) -> MemoryLogTensorDeallocation -> f MemoryLogTensorDeallocation

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "allocatorName" f MemoryLogTensorDeallocation MemoryLogTensorDeallocation a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocatorName" -> (a -> f b) -> MemoryLogTensorDeallocation -> f MemoryLogTensorDeallocation

                              data MemoryLogTensorOutput Source #

                              Instances

                              Eq MemoryLogTensorOutput Source # 
                              Ord MemoryLogTensorOutput Source # 
                              Show MemoryLogTensorOutput Source # 
                              Message MemoryLogTensorOutput Source # 

                              Methods

                              descriptor :: MessageDescriptor MemoryLogTensorOutput

                              Default MemoryLogTensorOutput Source # 
                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "index" f MemoryLogTensorOutput MemoryLogTensorOutput a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "index" -> (a -> f b) -> MemoryLogTensorOutput -> f MemoryLogTensorOutput

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "kernelName" f MemoryLogTensorOutput MemoryLogTensorOutput a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "kernelName" -> (a -> f b) -> MemoryLogTensorOutput -> f MemoryLogTensorOutput

                              ((~) * a (Maybe TensorDescription), (~) * b (Maybe TensorDescription), Functor f) => HasLens "maybe'tensor" f MemoryLogTensorOutput MemoryLogTensorOutput a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'tensor" -> (a -> f b) -> MemoryLogTensorOutput -> f MemoryLogTensorOutput

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "stepId" f MemoryLogTensorOutput MemoryLogTensorOutput a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "stepId" -> (a -> f b) -> MemoryLogTensorOutput -> f MemoryLogTensorOutput

                              ((~) * a TensorDescription, (~) * b TensorDescription, Functor f) => HasLens "tensor" f MemoryLogTensorOutput MemoryLogTensorOutput a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensor" -> (a -> f b) -> MemoryLogTensorOutput -> f MemoryLogTensorOutput

                              allocationId :: forall f s t a b. HasLens "allocationId" f s t a b => LensLike f s t a b Source #

                              allocatorName :: forall f s t a b. HasLens "allocatorName" f s t a b => LensLike f s t a b Source #

                              deferred :: forall f s t a b. HasLens "deferred" f s t a b => LensLike f s t a b Source #

                              handle :: forall f s t a b. HasLens "handle" f s t a b => LensLike f s t a b Source #

                              index :: forall f s t a b. HasLens "index" f s t a b => LensLike f s t a b Source #

                              kernelName :: forall f s t a b. HasLens "kernelName" f s t a b => LensLike f s t a b Source #

                              maybe'tensor :: forall f s t a b. HasLens "maybe'tensor" f s t a b => LensLike f s t a b Source #

                              numBytes :: forall f s t a b. HasLens "numBytes" f s t a b => LensLike f s t a b Source #

                              operation :: forall f s t a b. HasLens "operation" f s t a b => LensLike f s t a b Source #

                              ptr :: forall f s t a b. HasLens "ptr" f s t a b => LensLike f s t a b Source #

                              stepId :: forall f s t a b. HasLens "stepId" f s t a b => LensLike f s t a b Source #

                              tensor :: forall f s t a b. HasLens "tensor" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html index 8f51107..456bff4 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.NodeDef

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.NodeDef

                              Documentation

                              data NodeDef

                              Constructors

                              NodeDef 

                              Fields

                              _NodeDef'name :: !Text
                               
                              _NodeDef'op :: !Text
                               
                              _NodeDef'input :: ![Text]
                               
                              _NodeDef'device :: !Text
                               
                              _NodeDef'attr :: !(Map Text AttrValue)
                               

                              Instances

                              Eq NodeDef 
                              Show NodeDef 
                              Message NodeDef 
                              Default NodeDef 
                              HasField "attr" NodeDef NodeDef 
                              HasField "device" NodeDef NodeDef 
                              HasField "input" NodeDef NodeDef 
                              HasField "name" NodeDef NodeDef 
                              HasField "op" NodeDef NodeDef 
                              type Field "attr" NodeDef = Map Text AttrValue 
                              type Field "device" NodeDef = Text 
                              type Field "input" NodeDef = [Text] 
                              type Field "name" NodeDef = Text 
                              type Field "op" NodeDef = Text 

                              attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg')

                              device :: forall msg msg'. HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg')

                              input :: forall msg msg'. HasField "input" msg msg' => Lens msg msg' (Field "input" msg) (Field "input" msg')

                              key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg')

                              maybe'value :: forall msg msg'. HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg')

                              name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

                              op :: forall msg msg'. HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg')

                              value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.NodeDef

                              Documentation

                              data NodeDef Source #

                              Constructors

                              NodeDef 

                              Fields

                              Instances

                              Eq NodeDef Source # 

                              Methods

                              (==) :: NodeDef -> NodeDef -> Bool #

                              (/=) :: NodeDef -> NodeDef -> Bool #

                              Ord NodeDef Source # 
                              Show NodeDef Source # 
                              Message NodeDef Source # 

                              Methods

                              descriptor :: MessageDescriptor NodeDef

                              Default NodeDef Source # 

                              Methods

                              def :: NodeDef

                              ((~) * a (Map Text AttrValue), (~) * b (Map Text AttrValue), Functor f) => HasLens "attr" f NodeDef NodeDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "attr" -> (a -> f b) -> NodeDef -> f NodeDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "device" f NodeDef NodeDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "device" -> (a -> f b) -> NodeDef -> f NodeDef

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "input" f NodeDef NodeDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "input" -> (a -> f b) -> NodeDef -> f NodeDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f NodeDef NodeDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> NodeDef -> f NodeDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "op" f NodeDef NodeDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "op" -> (a -> f b) -> NodeDef -> f NodeDef

                              data NodeDef'AttrEntry Source #

                              Instances

                              Eq NodeDef'AttrEntry Source # 
                              Ord NodeDef'AttrEntry Source # 
                              Show NodeDef'AttrEntry Source # 
                              Message NodeDef'AttrEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor NodeDef'AttrEntry

                              Default NodeDef'AttrEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f NodeDef'AttrEntry NodeDef'AttrEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "key" -> (a -> f b) -> NodeDef'AttrEntry -> f NodeDef'AttrEntry

                              ((~) * a (Maybe AttrValue), (~) * b (Maybe AttrValue), Functor f) => HasLens "maybe'value" f NodeDef'AttrEntry NodeDef'AttrEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'value" -> (a -> f b) -> NodeDef'AttrEntry -> f NodeDef'AttrEntry

                              ((~) * a AttrValue, (~) * b AttrValue, Functor f) => HasLens "value" f NodeDef'AttrEntry NodeDef'AttrEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "value" -> (a -> f b) -> NodeDef'AttrEntry -> f NodeDef'AttrEntry

                              attr :: forall f s t a b. HasLens "attr" f s t a b => LensLike f s t a b Source #

                              device :: forall f s t a b. HasLens "device" f s t a b => LensLike f s t a b Source #

                              input :: forall f s t a b. HasLens "input" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              maybe'value :: forall f s t a b. HasLens "maybe'value" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              op :: forall f s t a b. HasLens "op" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html index 8dce10d..1cb79e1 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.OpDef

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.OpDef

                              Documentation

                              data OpDef

                              Instances

                              Eq OpDef 
                              Show OpDef 
                              Message OpDef 
                              Default OpDef 
                              HasField "allowsUninitializedInput" OpDef OpDef 
                              HasField "attr" OpDef OpDef 
                              HasField "deprecation" OpDef OpDef 
                              HasField "description" OpDef OpDef 
                              HasField "inputArg" OpDef OpDef 
                              HasField "isAggregate" OpDef OpDef 
                              HasField "isCommutative" OpDef OpDef 
                              HasField "isStateful" OpDef OpDef 
                              HasField "maybe'deprecation" OpDef OpDef 
                              HasField "name" OpDef OpDef 
                              HasField "outputArg" OpDef OpDef 
                              HasField "summary" OpDef OpDef 
                              type Field "allowsUninitializedInput" OpDef = Bool 
                              type Field "attr" OpDef = [OpDef'AttrDef] 
                              type Field "deprecation" OpDef = OpDeprecation 
                              type Field "description" OpDef = Text 
                              type Field "inputArg" OpDef = [OpDef'ArgDef] 
                              type Field "isAggregate" OpDef = Bool 
                              type Field "isCommutative" OpDef = Bool 
                              type Field "isStateful" OpDef = Bool 
                              type Field "maybe'deprecation" OpDef = Maybe OpDeprecation 
                              type Field "name" OpDef = Text 
                              type Field "outputArg" OpDef = [OpDef'ArgDef] 
                              type Field "summary" OpDef = Text 

                              data OpDef'ArgDef

                              Instances

                              Eq OpDef'ArgDef 
                              Show OpDef'ArgDef 
                              Message OpDef'ArgDef 
                              Default OpDef'ArgDef 
                              HasField "description" OpDef'ArgDef OpDef'ArgDef 
                              HasField "isRef" OpDef'ArgDef OpDef'ArgDef 
                              HasField "name" OpDef'ArgDef OpDef'ArgDef 
                              HasField "numberAttr" OpDef'ArgDef OpDef'ArgDef 
                              HasField "type'" OpDef'ArgDef OpDef'ArgDef 
                              HasField "typeAttr" OpDef'ArgDef OpDef'ArgDef 
                              HasField "typeListAttr" OpDef'ArgDef OpDef'ArgDef 
                              type Field "description" OpDef'ArgDef = Text 
                              type Field "isRef" OpDef'ArgDef = Bool 
                              type Field "name" OpDef'ArgDef = Text 
                              type Field "numberAttr" OpDef'ArgDef = Text 
                              type Field "type'" OpDef'ArgDef = DataType 
                              type Field "typeAttr" OpDef'ArgDef = Text 
                              type Field "typeListAttr" OpDef'ArgDef = Text 

                              data OpDef'AttrDef

                              Instances

                              Eq OpDef'AttrDef 
                              Show OpDef'AttrDef 
                              Message OpDef'AttrDef 
                              Default OpDef'AttrDef 
                              HasField "allowedValues" OpDef'AttrDef OpDef'AttrDef 
                              HasField "defaultValue" OpDef'AttrDef OpDef'AttrDef 
                              HasField "description" OpDef'AttrDef OpDef'AttrDef 
                              HasField "hasMinimum" OpDef'AttrDef OpDef'AttrDef 
                              HasField "maybe'allowedValues" OpDef'AttrDef OpDef'AttrDef 
                              HasField "maybe'defaultValue" OpDef'AttrDef OpDef'AttrDef 
                              HasField "minimum" OpDef'AttrDef OpDef'AttrDef 
                              HasField "name" OpDef'AttrDef OpDef'AttrDef 
                              HasField "type'" OpDef'AttrDef OpDef'AttrDef 
                              type Field "allowedValues" OpDef'AttrDef = AttrValue 
                              type Field "defaultValue" OpDef'AttrDef = AttrValue 
                              type Field "description" OpDef'AttrDef = Text 
                              type Field "hasMinimum" OpDef'AttrDef = Bool 
                              type Field "maybe'allowedValues" OpDef'AttrDef = Maybe AttrValue 
                              type Field "maybe'defaultValue" OpDef'AttrDef = Maybe AttrValue 
                              type Field "minimum" OpDef'AttrDef = Int64 
                              type Field "name" OpDef'AttrDef = Text 
                              type Field "type'" OpDef'AttrDef = Text 

                              data OpDeprecation

                              Instances

                              Eq OpDeprecation 
                              Show OpDeprecation 
                              Message OpDeprecation 
                              Default OpDeprecation 
                              HasField "explanation" OpDeprecation OpDeprecation 
                              HasField "version" OpDeprecation OpDeprecation 
                              type Field "explanation" OpDeprecation = Text 
                              type Field "version" OpDeprecation = Int32 

                              data OpList

                              Constructors

                              OpList 

                              Fields

                              _OpList'op :: ![OpDef]
                               

                              Instances

                              Eq OpList 
                              Show OpList 
                              Message OpList 
                              Default OpList 
                              HasField "op" OpList OpList 
                              type Field "op" OpList = [OpDef] 

                              allowedValues :: forall msg msg'. HasField "allowedValues" msg msg' => Lens msg msg' (Field "allowedValues" msg) (Field "allowedValues" msg')

                              allowsUninitializedInput :: forall msg msg'. HasField "allowsUninitializedInput" msg msg' => Lens msg msg' (Field "allowsUninitializedInput" msg) (Field "allowsUninitializedInput" msg')

                              attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg')

                              defaultValue :: forall msg msg'. HasField "defaultValue" msg msg' => Lens msg msg' (Field "defaultValue" msg) (Field "defaultValue" msg')

                              deprecation :: forall msg msg'. HasField "deprecation" msg msg' => Lens msg msg' (Field "deprecation" msg) (Field "deprecation" msg')

                              description :: forall msg msg'. HasField "description" msg msg' => Lens msg msg' (Field "description" msg) (Field "description" msg')

                              explanation :: forall msg msg'. HasField "explanation" msg msg' => Lens msg msg' (Field "explanation" msg) (Field "explanation" msg')

                              hasMinimum :: forall msg msg'. HasField "hasMinimum" msg msg' => Lens msg msg' (Field "hasMinimum" msg) (Field "hasMinimum" msg')

                              inputArg :: forall msg msg'. HasField "inputArg" msg msg' => Lens msg msg' (Field "inputArg" msg) (Field "inputArg" msg')

                              isAggregate :: forall msg msg'. HasField "isAggregate" msg msg' => Lens msg msg' (Field "isAggregate" msg) (Field "isAggregate" msg')

                              isCommutative :: forall msg msg'. HasField "isCommutative" msg msg' => Lens msg msg' (Field "isCommutative" msg) (Field "isCommutative" msg')

                              isRef :: forall msg msg'. HasField "isRef" msg msg' => Lens msg msg' (Field "isRef" msg) (Field "isRef" msg')

                              isStateful :: forall msg msg'. HasField "isStateful" msg msg' => Lens msg msg' (Field "isStateful" msg) (Field "isStateful" msg')

                              maybe'allowedValues :: forall msg msg'. HasField "maybe'allowedValues" msg msg' => Lens msg msg' (Field "maybe'allowedValues" msg) (Field "maybe'allowedValues" msg')

                              maybe'defaultValue :: forall msg msg'. HasField "maybe'defaultValue" msg msg' => Lens msg msg' (Field "maybe'defaultValue" msg) (Field "maybe'defaultValue" msg')

                              maybe'deprecation :: forall msg msg'. HasField "maybe'deprecation" msg msg' => Lens msg msg' (Field "maybe'deprecation" msg) (Field "maybe'deprecation" msg')

                              minimum :: forall msg msg'. HasField "minimum" msg msg' => Lens msg msg' (Field "minimum" msg) (Field "minimum" msg')

                              name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

                              numberAttr :: forall msg msg'. HasField "numberAttr" msg msg' => Lens msg msg' (Field "numberAttr" msg) (Field "numberAttr" msg')

                              op :: forall msg msg'. HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg')

                              outputArg :: forall msg msg'. HasField "outputArg" msg msg' => Lens msg msg' (Field "outputArg" msg) (Field "outputArg" msg')

                              summary :: forall msg msg'. HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg')

                              type' :: forall msg msg'. HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg')

                              typeAttr :: forall msg msg'. HasField "typeAttr" msg msg' => Lens msg msg' (Field "typeAttr" msg) (Field "typeAttr" msg')

                              typeListAttr :: forall msg msg'. HasField "typeListAttr" msg msg' => Lens msg msg' (Field "typeListAttr" msg) (Field "typeListAttr" msg')

                              version :: forall msg msg'. HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.OpDef

                              Documentation

                              data OpDef Source #

                              Instances

                              Eq OpDef Source # 

                              Methods

                              (==) :: OpDef -> OpDef -> Bool #

                              (/=) :: OpDef -> OpDef -> Bool #

                              Ord OpDef Source # 

                              Methods

                              compare :: OpDef -> OpDef -> Ordering #

                              (<) :: OpDef -> OpDef -> Bool #

                              (<=) :: OpDef -> OpDef -> Bool #

                              (>) :: OpDef -> OpDef -> Bool #

                              (>=) :: OpDef -> OpDef -> Bool #

                              max :: OpDef -> OpDef -> OpDef #

                              min :: OpDef -> OpDef -> OpDef #

                              Show OpDef Source # 

                              Methods

                              showsPrec :: Int -> OpDef -> ShowS #

                              show :: OpDef -> String #

                              showList :: [OpDef] -> ShowS #

                              Message OpDef Source # 

                              Methods

                              descriptor :: MessageDescriptor OpDef

                              Default OpDef Source # 

                              Methods

                              def :: OpDef

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "allowsUninitializedInput" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allowsUninitializedInput" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a [OpDef'AttrDef], (~) * b [OpDef'AttrDef], Functor f) => HasLens "attr" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "attr" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a OpDeprecation, (~) * b OpDeprecation, Functor f) => HasLens "deprecation" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deprecation" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "description" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "description" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a [OpDef'ArgDef], (~) * b [OpDef'ArgDef], Functor f) => HasLens "inputArg" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "inputArg" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "isAggregate" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "isAggregate" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "isCommutative" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "isCommutative" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "isStateful" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "isStateful" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a (Maybe OpDeprecation), (~) * b (Maybe OpDeprecation), Functor f) => HasLens "maybe'deprecation" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'deprecation" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a [OpDef'ArgDef], (~) * b [OpDef'ArgDef], Functor f) => HasLens "outputArg" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "outputArg" -> (a -> f b) -> OpDef -> f OpDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "summary" f OpDef OpDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "summary" -> (a -> f b) -> OpDef -> f OpDef

                              data OpDef'ArgDef Source #

                              Instances

                              Eq OpDef'ArgDef Source # 
                              Ord OpDef'ArgDef Source # 
                              Show OpDef'ArgDef Source # 
                              Message OpDef'ArgDef Source # 

                              Methods

                              descriptor :: MessageDescriptor OpDef'ArgDef

                              Default OpDef'ArgDef Source # 

                              Methods

                              def :: OpDef'ArgDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "description" f OpDef'ArgDef OpDef'ArgDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "description" -> (a -> f b) -> OpDef'ArgDef -> f OpDef'ArgDef

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "isRef" f OpDef'ArgDef OpDef'ArgDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "isRef" -> (a -> f b) -> OpDef'ArgDef -> f OpDef'ArgDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f OpDef'ArgDef OpDef'ArgDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> OpDef'ArgDef -> f OpDef'ArgDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "numberAttr" f OpDef'ArgDef OpDef'ArgDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "numberAttr" -> (a -> f b) -> OpDef'ArgDef -> f OpDef'ArgDef

                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "type'" f OpDef'ArgDef OpDef'ArgDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "type'" -> (a -> f b) -> OpDef'ArgDef -> f OpDef'ArgDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "typeAttr" f OpDef'ArgDef OpDef'ArgDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "typeAttr" -> (a -> f b) -> OpDef'ArgDef -> f OpDef'ArgDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "typeListAttr" f OpDef'ArgDef OpDef'ArgDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "typeListAttr" -> (a -> f b) -> OpDef'ArgDef -> f OpDef'ArgDef

                              data OpDef'AttrDef Source #

                              Instances

                              Eq OpDef'AttrDef Source # 
                              Ord OpDef'AttrDef Source # 
                              Show OpDef'AttrDef Source # 
                              Message OpDef'AttrDef Source # 

                              Methods

                              descriptor :: MessageDescriptor OpDef'AttrDef

                              Default OpDef'AttrDef Source # 

                              Methods

                              def :: OpDef'AttrDef

                              ((~) * a AttrValue, (~) * b AttrValue, Functor f) => HasLens "allowedValues" f OpDef'AttrDef OpDef'AttrDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allowedValues" -> (a -> f b) -> OpDef'AttrDef -> f OpDef'AttrDef

                              ((~) * a AttrValue, (~) * b AttrValue, Functor f) => HasLens "defaultValue" f OpDef'AttrDef OpDef'AttrDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "defaultValue" -> (a -> f b) -> OpDef'AttrDef -> f OpDef'AttrDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "description" f OpDef'AttrDef OpDef'AttrDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "description" -> (a -> f b) -> OpDef'AttrDef -> f OpDef'AttrDef

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "hasMinimum" f OpDef'AttrDef OpDef'AttrDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hasMinimum" -> (a -> f b) -> OpDef'AttrDef -> f OpDef'AttrDef

                              ((~) * a (Maybe AttrValue), (~) * b (Maybe AttrValue), Functor f) => HasLens "maybe'allowedValues" f OpDef'AttrDef OpDef'AttrDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'allowedValues" -> (a -> f b) -> OpDef'AttrDef -> f OpDef'AttrDef

                              ((~) * a (Maybe AttrValue), (~) * b (Maybe AttrValue), Functor f) => HasLens "maybe'defaultValue" f OpDef'AttrDef OpDef'AttrDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'defaultValue" -> (a -> f b) -> OpDef'AttrDef -> f OpDef'AttrDef

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "minimum" f OpDef'AttrDef OpDef'AttrDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "minimum" -> (a -> f b) -> OpDef'AttrDef -> f OpDef'AttrDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f OpDef'AttrDef OpDef'AttrDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> OpDef'AttrDef -> f OpDef'AttrDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "type'" f OpDef'AttrDef OpDef'AttrDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "type'" -> (a -> f b) -> OpDef'AttrDef -> f OpDef'AttrDef

                              data OpList Source #

                              Constructors

                              OpList 

                              Fields

                              Instances

                              Eq OpList Source # 

                              Methods

                              (==) :: OpList -> OpList -> Bool #

                              (/=) :: OpList -> OpList -> Bool #

                              Ord OpList Source # 
                              Show OpList Source # 
                              Message OpList Source # 

                              Methods

                              descriptor :: MessageDescriptor OpList

                              Default OpList Source # 

                              Methods

                              def :: OpList

                              ((~) * a [OpDef], (~) * b [OpDef], Functor f) => HasLens "op" f OpList OpList a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "op" -> (a -> f b) -> OpList -> f OpList

                              allowedValues :: forall f s t a b. HasLens "allowedValues" f s t a b => LensLike f s t a b Source #

                              allowsUninitializedInput :: forall f s t a b. HasLens "allowsUninitializedInput" f s t a b => LensLike f s t a b Source #

                              attr :: forall f s t a b. HasLens "attr" f s t a b => LensLike f s t a b Source #

                              defaultValue :: forall f s t a b. HasLens "defaultValue" f s t a b => LensLike f s t a b Source #

                              deprecation :: forall f s t a b. HasLens "deprecation" f s t a b => LensLike f s t a b Source #

                              description :: forall f s t a b. HasLens "description" f s t a b => LensLike f s t a b Source #

                              explanation :: forall f s t a b. HasLens "explanation" f s t a b => LensLike f s t a b Source #

                              hasMinimum :: forall f s t a b. HasLens "hasMinimum" f s t a b => LensLike f s t a b Source #

                              inputArg :: forall f s t a b. HasLens "inputArg" f s t a b => LensLike f s t a b Source #

                              isAggregate :: forall f s t a b. HasLens "isAggregate" f s t a b => LensLike f s t a b Source #

                              isCommutative :: forall f s t a b. HasLens "isCommutative" f s t a b => LensLike f s t a b Source #

                              isRef :: forall f s t a b. HasLens "isRef" f s t a b => LensLike f s t a b Source #

                              isStateful :: forall f s t a b. HasLens "isStateful" f s t a b => LensLike f s t a b Source #

                              maybe'allowedValues :: forall f s t a b. HasLens "maybe'allowedValues" f s t a b => LensLike f s t a b Source #

                              maybe'defaultValue :: forall f s t a b. HasLens "maybe'defaultValue" f s t a b => LensLike f s t a b Source #

                              maybe'deprecation :: forall f s t a b. HasLens "maybe'deprecation" f s t a b => LensLike f s t a b Source #

                              minimum :: forall f s t a b. HasLens "minimum" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              numberAttr :: forall f s t a b. HasLens "numberAttr" f s t a b => LensLike f s t a b Source #

                              op :: forall f s t a b. HasLens "op" f s t a b => LensLike f s t a b Source #

                              outputArg :: forall f s t a b. HasLens "outputArg" f s t a b => LensLike f s t a b Source #

                              summary :: forall f s t a b. HasLens "summary" f s t a b => LensLike f s t a b Source #

                              type' :: forall f s t a b. HasLens "type'" f s t a b => LensLike f s t a b Source #

                              typeAttr :: forall f s t a b. HasLens "typeAttr" f s t a b => LensLike f s t a b Source #

                              typeListAttr :: forall f s t a b. HasLens "typeListAttr" f s t a b => LensLike f s t a b Source #

                              version :: forall f s t a b. HasLens "version" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html index ab58a14..b97a160 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.ResourceHandle

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.ResourceHandle

                              Documentation

                              data ResourceHandle

                              Instances

                              Eq ResourceHandle 
                              Show ResourceHandle 
                              Message ResourceHandle 
                              Default ResourceHandle 
                              HasField "container" ResourceHandle ResourceHandle 
                              HasField "device" ResourceHandle ResourceHandle 
                              HasField "hashCode" ResourceHandle ResourceHandle 
                              HasField "maybeTypeName" ResourceHandle ResourceHandle 
                              HasField "name" ResourceHandle ResourceHandle 
                              type Field "container" ResourceHandle = Text 
                              type Field "device" ResourceHandle = Text 
                              type Field "hashCode" ResourceHandle = Word64 
                              type Field "maybeTypeName" ResourceHandle = Text 
                              type Field "name" ResourceHandle = Text 

                              container :: forall msg msg'. HasField "container" msg msg' => Lens msg msg' (Field "container" msg) (Field "container" msg')

                              device :: forall msg msg'. HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg')

                              hashCode :: forall msg msg'. HasField "hashCode" msg msg' => Lens msg msg' (Field "hashCode" msg) (Field "hashCode" msg')

                              maybeTypeName :: forall msg msg'. HasField "maybeTypeName" msg msg' => Lens msg msg' (Field "maybeTypeName" msg) (Field "maybeTypeName" msg')

                              name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.ResourceHandle

                              Documentation

                              data ResourceHandleProto Source #

                              Instances

                              Eq ResourceHandleProto Source # 
                              Ord ResourceHandleProto Source # 
                              Show ResourceHandleProto Source # 
                              Message ResourceHandleProto Source # 

                              Methods

                              descriptor :: MessageDescriptor ResourceHandleProto

                              Default ResourceHandleProto Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "container" f ResourceHandleProto ResourceHandleProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "container" -> (a -> f b) -> ResourceHandleProto -> f ResourceHandleProto

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "device" f ResourceHandleProto ResourceHandleProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "device" -> (a -> f b) -> ResourceHandleProto -> f ResourceHandleProto

                              ((~) * a Word64, (~) * b Word64, Functor f) => HasLens "hashCode" f ResourceHandleProto ResourceHandleProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hashCode" -> (a -> f b) -> ResourceHandleProto -> f ResourceHandleProto

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "maybeTypeName" f ResourceHandleProto ResourceHandleProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybeTypeName" -> (a -> f b) -> ResourceHandleProto -> f ResourceHandleProto

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f ResourceHandleProto ResourceHandleProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> ResourceHandleProto -> f ResourceHandleProto

                              container :: forall f s t a b. HasLens "container" f s t a b => LensLike f s t a b Source #

                              device :: forall f s t a b. HasLens "device" f s t a b => LensLike f s t a b Source #

                              hashCode :: forall f s t a b. HasLens "hashCode" f s t a b => LensLike f s t a b Source #

                              maybeTypeName :: forall f s t a b. HasLens "maybeTypeName" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-StepStats.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-StepStats.html new file mode 100644 index 0000000..a0e129b --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-StepStats.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.StepStats

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.StepStats

                              Documentation

                              data AllocatorMemoryUsed Source #

                              Instances

                              Eq AllocatorMemoryUsed Source # 
                              Ord AllocatorMemoryUsed Source # 
                              Show AllocatorMemoryUsed Source # 
                              Message AllocatorMemoryUsed Source # 

                              Methods

                              descriptor :: MessageDescriptor AllocatorMemoryUsed

                              Default AllocatorMemoryUsed Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "allocatorBytesInUse" f AllocatorMemoryUsed AllocatorMemoryUsed a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocatorBytesInUse" -> (a -> f b) -> AllocatorMemoryUsed -> f AllocatorMemoryUsed

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "allocatorName" f AllocatorMemoryUsed AllocatorMemoryUsed a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocatorName" -> (a -> f b) -> AllocatorMemoryUsed -> f AllocatorMemoryUsed

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "liveBytes" f AllocatorMemoryUsed AllocatorMemoryUsed a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "liveBytes" -> (a -> f b) -> AllocatorMemoryUsed -> f AllocatorMemoryUsed

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "peakBytes" f AllocatorMemoryUsed AllocatorMemoryUsed a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "peakBytes" -> (a -> f b) -> AllocatorMemoryUsed -> f AllocatorMemoryUsed

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "totalBytes" f AllocatorMemoryUsed AllocatorMemoryUsed a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "totalBytes" -> (a -> f b) -> AllocatorMemoryUsed -> f AllocatorMemoryUsed

                              data MemoryStats Source #

                              Instances

                              Eq MemoryStats Source # 
                              Ord MemoryStats Source # 
                              Show MemoryStats Source # 
                              Message MemoryStats Source # 

                              Methods

                              descriptor :: MessageDescriptor MemoryStats

                              Default MemoryStats Source # 

                              Methods

                              def :: MemoryStats

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "devicePersistentMemorySize" f MemoryStats MemoryStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "devicePersistentMemorySize" -> (a -> f b) -> MemoryStats -> f MemoryStats

                              ((~) * a [Int64], (~) * b [Int64], Functor f) => HasLens "devicePersistentTensorAllocIds" f MemoryStats MemoryStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "devicePersistentTensorAllocIds" -> (a -> f b) -> MemoryStats -> f MemoryStats

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "deviceTempMemorySize" f MemoryStats MemoryStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deviceTempMemorySize" -> (a -> f b) -> MemoryStats -> f MemoryStats

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "hostPersistentMemorySize" f MemoryStats MemoryStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hostPersistentMemorySize" -> (a -> f b) -> MemoryStats -> f MemoryStats

                              ((~) * a [Int64], (~) * b [Int64], Functor f) => HasLens "hostPersistentTensorAllocIds" f MemoryStats MemoryStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hostPersistentTensorAllocIds" -> (a -> f b) -> MemoryStats -> f MemoryStats

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "hostTempMemorySize" f MemoryStats MemoryStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hostTempMemorySize" -> (a -> f b) -> MemoryStats -> f MemoryStats

                              data NodeExecStats Source #

                              Instances

                              Eq NodeExecStats Source # 
                              Ord NodeExecStats Source # 
                              Show NodeExecStats Source # 
                              Message NodeExecStats Source # 

                              Methods

                              descriptor :: MessageDescriptor NodeExecStats

                              Default NodeExecStats Source # 

                              Methods

                              def :: NodeExecStats

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "allEndRelMicros" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allEndRelMicros" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "allStartMicros" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allStartMicros" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a (Maybe MemoryStats), (~) * b (Maybe MemoryStats), Functor f) => HasLens "maybe'memoryStats" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'memoryStats" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a [AllocatorMemoryUsed], (~) * b [AllocatorMemoryUsed], Functor f) => HasLens "memory" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "memory" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a MemoryStats, (~) * b MemoryStats, Functor f) => HasLens "memoryStats" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "memoryStats" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "nodeName" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "nodeName" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "opEndRelMicros" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "opEndRelMicros" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "opStartRelMicros" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "opStartRelMicros" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a [NodeOutput], (~) * b [NodeOutput], Functor f) => HasLens "output" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "output" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a [AllocationDescription], (~) * b [AllocationDescription], Functor f) => HasLens "referencedTensor" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "referencedTensor" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "scheduledMicros" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "scheduledMicros" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a Word32, (~) * b Word32, Functor f) => HasLens "threadId" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "threadId" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "timelineLabel" f NodeExecStats NodeExecStats a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "timelineLabel" -> (a -> f b) -> NodeExecStats -> f NodeExecStats

                              data NodeOutput Source #

                              Instances

                              Eq NodeOutput Source # 
                              Ord NodeOutput Source # 
                              Show NodeOutput Source # 
                              Message NodeOutput Source # 

                              Methods

                              descriptor :: MessageDescriptor NodeOutput

                              Default NodeOutput Source # 

                              Methods

                              def :: NodeOutput

                              ((~) * a (Maybe TensorDescription), (~) * b (Maybe TensorDescription), Functor f) => HasLens "maybe'tensorDescription" f NodeOutput NodeOutput a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'tensorDescription" -> (a -> f b) -> NodeOutput -> f NodeOutput

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "slot" f NodeOutput NodeOutput a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "slot" -> (a -> f b) -> NodeOutput -> f NodeOutput

                              ((~) * a TensorDescription, (~) * b TensorDescription, Functor f) => HasLens "tensorDescription" f NodeOutput NodeOutput a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensorDescription" -> (a -> f b) -> NodeOutput -> f NodeOutput

                              data StepStats Source #

                              Constructors

                              StepStats 

                              Instances

                              allEndRelMicros :: forall f s t a b. HasLens "allEndRelMicros" f s t a b => LensLike f s t a b Source #

                              allStartMicros :: forall f s t a b. HasLens "allStartMicros" f s t a b => LensLike f s t a b Source #

                              allocatorBytesInUse :: forall f s t a b. HasLens "allocatorBytesInUse" f s t a b => LensLike f s t a b Source #

                              allocatorName :: forall f s t a b. HasLens "allocatorName" f s t a b => LensLike f s t a b Source #

                              devStats :: forall f s t a b. HasLens "devStats" f s t a b => LensLike f s t a b Source #

                              device :: forall f s t a b. HasLens "device" f s t a b => LensLike f s t a b Source #

                              devicePersistentMemorySize :: forall f s t a b. HasLens "devicePersistentMemorySize" f s t a b => LensLike f s t a b Source #

                              devicePersistentTensorAllocIds :: forall f s t a b. HasLens "devicePersistentTensorAllocIds" f s t a b => LensLike f s t a b Source #

                              deviceTempMemorySize :: forall f s t a b. HasLens "deviceTempMemorySize" f s t a b => LensLike f s t a b Source #

                              hostPersistentMemorySize :: forall f s t a b. HasLens "hostPersistentMemorySize" f s t a b => LensLike f s t a b Source #

                              hostPersistentTensorAllocIds :: forall f s t a b. HasLens "hostPersistentTensorAllocIds" f s t a b => LensLike f s t a b Source #

                              hostTempMemorySize :: forall f s t a b. HasLens "hostTempMemorySize" f s t a b => LensLike f s t a b Source #

                              liveBytes :: forall f s t a b. HasLens "liveBytes" f s t a b => LensLike f s t a b Source #

                              maybe'memoryStats :: forall f s t a b. HasLens "maybe'memoryStats" f s t a b => LensLike f s t a b Source #

                              maybe'tensorDescription :: forall f s t a b. HasLens "maybe'tensorDescription" f s t a b => LensLike f s t a b Source #

                              memory :: forall f s t a b. HasLens "memory" f s t a b => LensLike f s t a b Source #

                              memoryStats :: forall f s t a b. HasLens "memoryStats" f s t a b => LensLike f s t a b Source #

                              nodeName :: forall f s t a b. HasLens "nodeName" f s t a b => LensLike f s t a b Source #

                              nodeStats :: forall f s t a b. HasLens "nodeStats" f s t a b => LensLike f s t a b Source #

                              opEndRelMicros :: forall f s t a b. HasLens "opEndRelMicros" f s t a b => LensLike f s t a b Source #

                              opStartRelMicros :: forall f s t a b. HasLens "opStartRelMicros" f s t a b => LensLike f s t a b Source #

                              output :: forall f s t a b. HasLens "output" f s t a b => LensLike f s t a b Source #

                              peakBytes :: forall f s t a b. HasLens "peakBytes" f s t a b => LensLike f s t a b Source #

                              referencedTensor :: forall f s t a b. HasLens "referencedTensor" f s t a b => LensLike f s t a b Source #

                              scheduledMicros :: forall f s t a b. HasLens "scheduledMicros" f s t a b => LensLike f s t a b Source #

                              slot :: forall f s t a b. HasLens "slot" f s t a b => LensLike f s t a b Source #

                              tensorDescription :: forall f s t a b. HasLens "tensorDescription" f s t a b => LensLike f s t a b Source #

                              threadId :: forall f s t a b. HasLens "threadId" f s t a b => LensLike f s t a b Source #

                              timelineLabel :: forall f s t a b. HasLens "timelineLabel" f s t a b => LensLike f s t a b Source #

                              totalBytes :: forall f s t a b. HasLens "totalBytes" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Summary.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Summary.html index f5d1c6d..13d3ff6 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Summary.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Summary.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.Summary

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Summary

                              Documentation

                              data HistogramProto

                              Instances

                              Eq HistogramProto 
                              Show HistogramProto 
                              Message HistogramProto 
                              Default HistogramProto 
                              HasField "bucket" HistogramProto HistogramProto 
                              HasField "bucketLimit" HistogramProto HistogramProto 
                              HasField "max" HistogramProto HistogramProto 
                              HasField "min" HistogramProto HistogramProto 
                              HasField "num" HistogramProto HistogramProto 
                              HasField "sum" HistogramProto HistogramProto 
                              HasField "sumSquares" HistogramProto HistogramProto 
                              type Field "bucket" HistogramProto = [Double] 
                              type Field "bucketLimit" HistogramProto = [Double] 
                              type Field "max" HistogramProto = Double 
                              type Field "min" HistogramProto = Double 
                              type Field "num" HistogramProto = Double 
                              type Field "sum" HistogramProto = Double 
                              type Field "sumSquares" HistogramProto = Double 

                              data Summary

                              Constructors

                              Summary 

                              Instances

                              Eq Summary 
                              Show Summary 
                              Message Summary 
                              Default Summary 
                              HasField "value" Summary Summary 
                              type Field "value" Summary = [Summary'Value] 

                              data Summary'Audio

                              Instances

                              Eq Summary'Audio 
                              Show Summary'Audio 
                              Message Summary'Audio 
                              Default Summary'Audio 
                              HasField "contentType" Summary'Audio Summary'Audio 
                              HasField "encodedAudioString" Summary'Audio Summary'Audio 
                              HasField "lengthFrames" Summary'Audio Summary'Audio 
                              HasField "numChannels" Summary'Audio Summary'Audio 
                              HasField "sampleRate" Summary'Audio Summary'Audio 
                              type Field "contentType" Summary'Audio = Text 
                              type Field "encodedAudioString" Summary'Audio = ByteString 
                              type Field "lengthFrames" Summary'Audio = Int64 
                              type Field "numChannels" Summary'Audio = Int64 
                              type Field "sampleRate" Summary'Audio = Float 

                              data Summary'Image

                              Instances

                              Eq Summary'Image 
                              Show Summary'Image 
                              Message Summary'Image 
                              Default Summary'Image 
                              HasField "colorspace" Summary'Image Summary'Image 
                              HasField "encodedImageString" Summary'Image Summary'Image 
                              HasField "height" Summary'Image Summary'Image 
                              HasField "width" Summary'Image Summary'Image 
                              type Field "colorspace" Summary'Image = Int32 
                              type Field "encodedImageString" Summary'Image = ByteString 
                              type Field "height" Summary'Image = Int32 
                              type Field "width" Summary'Image = Int32 

                              data Summary'Value

                              Instances

                              Eq Summary'Value 
                              Show Summary'Value 
                              Message Summary'Value 
                              Default Summary'Value 
                              HasField "audio" Summary'Value Summary'Value 
                              HasField "histo" Summary'Value Summary'Value 
                              HasField "image" Summary'Value Summary'Value 
                              HasField "maybe'audio" Summary'Value Summary'Value 
                              HasField "maybe'histo" Summary'Value Summary'Value 
                              HasField "maybe'image" Summary'Value Summary'Value 
                              HasField "maybe'obsoleteOldStyleHistogram" Summary'Value Summary'Value 
                              HasField "maybe'simpleValue" Summary'Value Summary'Value 
                              HasField "maybe'tensor" Summary'Value Summary'Value 
                              HasField "nodeName" Summary'Value Summary'Value 
                              HasField "obsoleteOldStyleHistogram" Summary'Value Summary'Value 
                              HasField "simpleValue" Summary'Value Summary'Value 
                              HasField "tag" Summary'Value Summary'Value 
                              HasField "tensor" Summary'Value Summary'Value 
                              type Field "audio" Summary'Value = Summary'Audio 
                              type Field "histo" Summary'Value = HistogramProto 
                              type Field "image" Summary'Value = Summary'Image 
                              type Field "maybe'audio" Summary'Value = Maybe Summary'Audio 
                              type Field "maybe'histo" Summary'Value = Maybe HistogramProto 
                              type Field "maybe'image" Summary'Value = Maybe Summary'Image 
                              type Field "maybe'obsoleteOldStyleHistogram" Summary'Value = Maybe ByteString 
                              type Field "maybe'simpleValue" Summary'Value = Maybe Float 
                              type Field "maybe'tensor" Summary'Value = Maybe TensorProto 
                              type Field "nodeName" Summary'Value = Text 
                              type Field "obsoleteOldStyleHistogram" Summary'Value = ByteString 
                              type Field "simpleValue" Summary'Value = Float 
                              type Field "tag" Summary'Value = Text 
                              type Field "tensor" Summary'Value = TensorProto 

                              audio :: forall msg msg'. HasField "audio" msg msg' => Lens msg msg' (Field "audio" msg) (Field "audio" msg')

                              bucket :: forall msg msg'. HasField "bucket" msg msg' => Lens msg msg' (Field "bucket" msg) (Field "bucket" msg')

                              bucketLimit :: forall msg msg'. HasField "bucketLimit" msg msg' => Lens msg msg' (Field "bucketLimit" msg) (Field "bucketLimit" msg')

                              colorspace :: forall msg msg'. HasField "colorspace" msg msg' => Lens msg msg' (Field "colorspace" msg) (Field "colorspace" msg')

                              contentType :: forall msg msg'. HasField "contentType" msg msg' => Lens msg msg' (Field "contentType" msg) (Field "contentType" msg')

                              encodedAudioString :: forall msg msg'. HasField "encodedAudioString" msg msg' => Lens msg msg' (Field "encodedAudioString" msg) (Field "encodedAudioString" msg')

                              encodedImageString :: forall msg msg'. HasField "encodedImageString" msg msg' => Lens msg msg' (Field "encodedImageString" msg) (Field "encodedImageString" msg')

                              height :: forall msg msg'. HasField "height" msg msg' => Lens msg msg' (Field "height" msg) (Field "height" msg')

                              histo :: forall msg msg'. HasField "histo" msg msg' => Lens msg msg' (Field "histo" msg) (Field "histo" msg')

                              image :: forall msg msg'. HasField "image" msg msg' => Lens msg msg' (Field "image" msg) (Field "image" msg')

                              lengthFrames :: forall msg msg'. HasField "lengthFrames" msg msg' => Lens msg msg' (Field "lengthFrames" msg) (Field "lengthFrames" msg')

                              max :: forall msg msg'. HasField "max" msg msg' => Lens msg msg' (Field "max" msg) (Field "max" msg')

                              maybe'audio :: forall msg msg'. HasField "maybe'audio" msg msg' => Lens msg msg' (Field "maybe'audio" msg) (Field "maybe'audio" msg')

                              maybe'histo :: forall msg msg'. HasField "maybe'histo" msg msg' => Lens msg msg' (Field "maybe'histo" msg) (Field "maybe'histo" msg')

                              maybe'image :: forall msg msg'. HasField "maybe'image" msg msg' => Lens msg msg' (Field "maybe'image" msg) (Field "maybe'image" msg')

                              maybe'obsoleteOldStyleHistogram :: forall msg msg'. HasField "maybe'obsoleteOldStyleHistogram" msg msg' => Lens msg msg' (Field "maybe'obsoleteOldStyleHistogram" msg) (Field "maybe'obsoleteOldStyleHistogram" msg')

                              maybe'simpleValue :: forall msg msg'. HasField "maybe'simpleValue" msg msg' => Lens msg msg' (Field "maybe'simpleValue" msg) (Field "maybe'simpleValue" msg')

                              maybe'tensor :: forall msg msg'. HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg')

                              min :: forall msg msg'. HasField "min" msg msg' => Lens msg msg' (Field "min" msg) (Field "min" msg')

                              nodeName :: forall msg msg'. HasField "nodeName" msg msg' => Lens msg msg' (Field "nodeName" msg) (Field "nodeName" msg')

                              num :: forall msg msg'. HasField "num" msg msg' => Lens msg msg' (Field "num" msg) (Field "num" msg')

                              numChannels :: forall msg msg'. HasField "numChannels" msg msg' => Lens msg msg' (Field "numChannels" msg) (Field "numChannels" msg')

                              obsoleteOldStyleHistogram :: forall msg msg'. HasField "obsoleteOldStyleHistogram" msg msg' => Lens msg msg' (Field "obsoleteOldStyleHistogram" msg) (Field "obsoleteOldStyleHistogram" msg')

                              sampleRate :: forall msg msg'. HasField "sampleRate" msg msg' => Lens msg msg' (Field "sampleRate" msg) (Field "sampleRate" msg')

                              simpleValue :: forall msg msg'. HasField "simpleValue" msg msg' => Lens msg msg' (Field "simpleValue" msg) (Field "simpleValue" msg')

                              sum :: forall msg msg'. HasField "sum" msg msg' => Lens msg msg' (Field "sum" msg) (Field "sum" msg')

                              sumSquares :: forall msg msg'. HasField "sumSquares" msg msg' => Lens msg msg' (Field "sumSquares" msg) (Field "sumSquares" msg')

                              tag :: forall msg msg'. HasField "tag" msg msg' => Lens msg msg' (Field "tag" msg) (Field "tag" msg')

                              tensor :: forall msg msg'. HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg')

                              typeHint :: forall msg msg'. HasField "typeHint" msg msg' => Lens msg msg' (Field "typeHint" msg) (Field "typeHint" msg')

                              value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg')

                              width :: forall msg msg'. HasField "width" msg msg' => Lens msg msg' (Field "width" msg) (Field "width" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Summary

                              Documentation

                              data HistogramProto Source #

                              Instances

                              Eq HistogramProto Source # 
                              Ord HistogramProto Source # 
                              Show HistogramProto Source # 
                              Message HistogramProto Source # 

                              Methods

                              descriptor :: MessageDescriptor HistogramProto

                              Default HistogramProto Source # 
                              ((~) * a [Double], (~) * b [Double], Functor f) => HasLens "bucket" f HistogramProto HistogramProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "bucket" -> (a -> f b) -> HistogramProto -> f HistogramProto

                              ((~) * a [Double], (~) * b [Double], Functor f) => HasLens "bucketLimit" f HistogramProto HistogramProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "bucketLimit" -> (a -> f b) -> HistogramProto -> f HistogramProto

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "max" f HistogramProto HistogramProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "max" -> (a -> f b) -> HistogramProto -> f HistogramProto

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "min" f HistogramProto HistogramProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "min" -> (a -> f b) -> HistogramProto -> f HistogramProto

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "num" f HistogramProto HistogramProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "num" -> (a -> f b) -> HistogramProto -> f HistogramProto

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "sum" f HistogramProto HistogramProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "sum" -> (a -> f b) -> HistogramProto -> f HistogramProto

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "sumSquares" f HistogramProto HistogramProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "sumSquares" -> (a -> f b) -> HistogramProto -> f HistogramProto

                              data Summary Source #

                              Constructors

                              Summary 

                              Instances

                              Eq Summary Source # 

                              Methods

                              (==) :: Summary -> Summary -> Bool #

                              (/=) :: Summary -> Summary -> Bool #

                              Ord Summary Source # 
                              Show Summary Source # 
                              Message Summary Source # 

                              Methods

                              descriptor :: MessageDescriptor Summary

                              Default Summary Source # 

                              Methods

                              def :: Summary

                              ((~) * a [Summary'Value], (~) * b [Summary'Value], Functor f) => HasLens "value" f Summary Summary a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "value" -> (a -> f b) -> Summary -> f Summary

                              data Summary'Audio Source #

                              Instances

                              Eq Summary'Audio Source # 
                              Ord Summary'Audio Source # 
                              Show Summary'Audio Source # 
                              Message Summary'Audio Source # 

                              Methods

                              descriptor :: MessageDescriptor Summary'Audio

                              Default Summary'Audio Source # 

                              Methods

                              def :: Summary'Audio

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "contentType" f Summary'Audio Summary'Audio a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "contentType" -> (a -> f b) -> Summary'Audio -> f Summary'Audio

                              ((~) * a ByteString, (~) * b ByteString, Functor f) => HasLens "encodedAudioString" f Summary'Audio Summary'Audio a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "encodedAudioString" -> (a -> f b) -> Summary'Audio -> f Summary'Audio

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "lengthFrames" f Summary'Audio Summary'Audio a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "lengthFrames" -> (a -> f b) -> Summary'Audio -> f Summary'Audio

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "numChannels" f Summary'Audio Summary'Audio a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "numChannels" -> (a -> f b) -> Summary'Audio -> f Summary'Audio

                              ((~) * a Float, (~) * b Float, Functor f) => HasLens "sampleRate" f Summary'Audio Summary'Audio a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "sampleRate" -> (a -> f b) -> Summary'Audio -> f Summary'Audio

                              data Summary'Image Source #

                              Instances

                              Eq Summary'Image Source # 
                              Ord Summary'Image Source # 
                              Show Summary'Image Source # 
                              Message Summary'Image Source # 

                              Methods

                              descriptor :: MessageDescriptor Summary'Image

                              Default Summary'Image Source # 

                              Methods

                              def :: Summary'Image

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "colorspace" f Summary'Image Summary'Image a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "colorspace" -> (a -> f b) -> Summary'Image -> f Summary'Image

                              ((~) * a ByteString, (~) * b ByteString, Functor f) => HasLens "encodedImageString" f Summary'Image Summary'Image a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "encodedImageString" -> (a -> f b) -> Summary'Image -> f Summary'Image

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "height" f Summary'Image Summary'Image a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "height" -> (a -> f b) -> Summary'Image -> f Summary'Image

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "width" f Summary'Image Summary'Image a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "width" -> (a -> f b) -> Summary'Image -> f Summary'Image

                              data Summary'Value Source #

                              Instances

                              Eq Summary'Value Source # 
                              Ord Summary'Value Source # 
                              Show Summary'Value Source # 
                              Message Summary'Value Source # 

                              Methods

                              descriptor :: MessageDescriptor Summary'Value

                              Default Summary'Value Source # 

                              Methods

                              def :: Summary'Value

                              ((~) * a Summary'Audio, (~) * b Summary'Audio, Functor f) => HasLens "audio" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "audio" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a HistogramProto, (~) * b HistogramProto, Functor f) => HasLens "histo" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "histo" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a Summary'Image, (~) * b Summary'Image, Functor f) => HasLens "image" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "image" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a (Maybe Summary'Audio), (~) * b (Maybe Summary'Audio), Functor f) => HasLens "maybe'audio" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'audio" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a (Maybe HistogramProto), (~) * b (Maybe HistogramProto), Functor f) => HasLens "maybe'histo" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'histo" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a (Maybe Summary'Image), (~) * b (Maybe Summary'Image), Functor f) => HasLens "maybe'image" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'image" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a (Maybe SummaryMetadata), (~) * b (Maybe SummaryMetadata), Functor f) => HasLens "maybe'metadata" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'metadata" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a (Maybe ByteString), (~) * b (Maybe ByteString), Functor f) => HasLens "maybe'obsoleteOldStyleHistogram" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'obsoleteOldStyleHistogram" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a (Maybe Float), (~) * b (Maybe Float), Functor f) => HasLens "maybe'simpleValue" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'simpleValue" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a (Maybe TensorProto), (~) * b (Maybe TensorProto), Functor f) => HasLens "maybe'tensor" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'tensor" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a (Maybe Summary'Value'Value), (~) * b (Maybe Summary'Value'Value), Functor f) => HasLens "maybe'value" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'value" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a SummaryMetadata, (~) * b SummaryMetadata, Functor f) => HasLens "metadata" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "metadata" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "nodeName" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "nodeName" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a ByteString, (~) * b ByteString, Functor f) => HasLens "obsoleteOldStyleHistogram" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "obsoleteOldStyleHistogram" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a Float, (~) * b Float, Functor f) => HasLens "simpleValue" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "simpleValue" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "tag" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tag" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              ((~) * a TensorProto, (~) * b TensorProto, Functor f) => HasLens "tensor" f Summary'Value Summary'Value a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensor" -> (a -> f b) -> Summary'Value -> f Summary'Value

                              data SummaryMetadata Source #

                              Instances

                              Eq SummaryMetadata Source # 
                              Ord SummaryMetadata Source # 
                              Show SummaryMetadata Source # 
                              Message SummaryMetadata Source # 

                              Methods

                              descriptor :: MessageDescriptor SummaryMetadata

                              Default SummaryMetadata Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "displayName" f SummaryMetadata SummaryMetadata a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "displayName" -> (a -> f b) -> SummaryMetadata -> f SummaryMetadata

                              ((~) * a (Maybe SummaryMetadata'PluginData), (~) * b (Maybe SummaryMetadata'PluginData), Functor f) => HasLens "maybe'pluginData" f SummaryMetadata SummaryMetadata a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'pluginData" -> (a -> f b) -> SummaryMetadata -> f SummaryMetadata

                              ((~) * a SummaryMetadata'PluginData, (~) * b SummaryMetadata'PluginData, Functor f) => HasLens "pluginData" f SummaryMetadata SummaryMetadata a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "pluginData" -> (a -> f b) -> SummaryMetadata -> f SummaryMetadata

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "summaryDescription" f SummaryMetadata SummaryMetadata a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "summaryDescription" -> (a -> f b) -> SummaryMetadata -> f SummaryMetadata

                              data SummaryMetadata'PluginData Source #

                              Instances

                              Eq SummaryMetadata'PluginData Source # 
                              Ord SummaryMetadata'PluginData Source # 
                              Show SummaryMetadata'PluginData Source # 
                              Message SummaryMetadata'PluginData Source # 

                              Methods

                              descriptor :: MessageDescriptor SummaryMetadata'PluginData

                              Default SummaryMetadata'PluginData Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "content" f SummaryMetadata'PluginData SummaryMetadata'PluginData a b Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "pluginName" f SummaryMetadata'PluginData SummaryMetadata'PluginData a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "pluginName" -> (a -> f b) -> SummaryMetadata'PluginData -> f SummaryMetadata'PluginData

                              audio :: forall f s t a b. HasLens "audio" f s t a b => LensLike f s t a b Source #

                              bucket :: forall f s t a b. HasLens "bucket" f s t a b => LensLike f s t a b Source #

                              bucketLimit :: forall f s t a b. HasLens "bucketLimit" f s t a b => LensLike f s t a b Source #

                              colorspace :: forall f s t a b. HasLens "colorspace" f s t a b => LensLike f s t a b Source #

                              content :: forall f s t a b. HasLens "content" f s t a b => LensLike f s t a b Source #

                              contentType :: forall f s t a b. HasLens "contentType" f s t a b => LensLike f s t a b Source #

                              displayName :: forall f s t a b. HasLens "displayName" f s t a b => LensLike f s t a b Source #

                              encodedAudioString :: forall f s t a b. HasLens "encodedAudioString" f s t a b => LensLike f s t a b Source #

                              encodedImageString :: forall f s t a b. HasLens "encodedImageString" f s t a b => LensLike f s t a b Source #

                              height :: forall f s t a b. HasLens "height" f s t a b => LensLike f s t a b Source #

                              histo :: forall f s t a b. HasLens "histo" f s t a b => LensLike f s t a b Source #

                              image :: forall f s t a b. HasLens "image" f s t a b => LensLike f s t a b Source #

                              lengthFrames :: forall f s t a b. HasLens "lengthFrames" f s t a b => LensLike f s t a b Source #

                              max :: forall f s t a b. HasLens "max" f s t a b => LensLike f s t a b Source #

                              maybe'audio :: forall f s t a b. HasLens "maybe'audio" f s t a b => LensLike f s t a b Source #

                              maybe'histo :: forall f s t a b. HasLens "maybe'histo" f s t a b => LensLike f s t a b Source #

                              maybe'image :: forall f s t a b. HasLens "maybe'image" f s t a b => LensLike f s t a b Source #

                              maybe'metadata :: forall f s t a b. HasLens "maybe'metadata" f s t a b => LensLike f s t a b Source #

                              maybe'obsoleteOldStyleHistogram :: forall f s t a b. HasLens "maybe'obsoleteOldStyleHistogram" f s t a b => LensLike f s t a b Source #

                              maybe'pluginData :: forall f s t a b. HasLens "maybe'pluginData" f s t a b => LensLike f s t a b Source #

                              maybe'simpleValue :: forall f s t a b. HasLens "maybe'simpleValue" f s t a b => LensLike f s t a b Source #

                              maybe'tensor :: forall f s t a b. HasLens "maybe'tensor" f s t a b => LensLike f s t a b Source #

                              maybe'value :: forall f s t a b. HasLens "maybe'value" f s t a b => LensLike f s t a b Source #

                              metadata :: forall f s t a b. HasLens "metadata" f s t a b => LensLike f s t a b Source #

                              min :: forall f s t a b. HasLens "min" f s t a b => LensLike f s t a b Source #

                              nodeName :: forall f s t a b. HasLens "nodeName" f s t a b => LensLike f s t a b Source #

                              num :: forall f s t a b. HasLens "num" f s t a b => LensLike f s t a b Source #

                              numChannels :: forall f s t a b. HasLens "numChannels" f s t a b => LensLike f s t a b Source #

                              obsoleteOldStyleHistogram :: forall f s t a b. HasLens "obsoleteOldStyleHistogram" f s t a b => LensLike f s t a b Source #

                              pluginData :: forall f s t a b. HasLens "pluginData" f s t a b => LensLike f s t a b Source #

                              pluginName :: forall f s t a b. HasLens "pluginName" f s t a b => LensLike f s t a b Source #

                              sampleRate :: forall f s t a b. HasLens "sampleRate" f s t a b => LensLike f s t a b Source #

                              simpleValue :: forall f s t a b. HasLens "simpleValue" f s t a b => LensLike f s t a b Source #

                              sum :: forall f s t a b. HasLens "sum" f s t a b => LensLike f s t a b Source #

                              sumSquares :: forall f s t a b. HasLens "sumSquares" f s t a b => LensLike f s t a b Source #

                              summaryDescription :: forall f s t a b. HasLens "summaryDescription" f s t a b => LensLike f s t a b Source #

                              tag :: forall f s t a b. HasLens "tag" f s t a b => LensLike f s t a b Source #

                              tensor :: forall f s t a b. HasLens "tensor" f s t a b => LensLike f s t a b Source #

                              typeHint :: forall f s t a b. HasLens "typeHint" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              width :: forall f s t a b. HasLens "width" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html index d6e9f34..6d154eb 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.Tensor

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Tensor

                              Documentation

                              data TensorProto

                              Instances

                              Eq TensorProto 
                              Show TensorProto 
                              Message TensorProto 
                              Default TensorProto 
                              HasField "boolVal" TensorProto TensorProto 
                              HasField "dcomplexVal" TensorProto TensorProto 
                              HasField "doubleVal" TensorProto TensorProto 
                              HasField "dtype" TensorProto TensorProto 
                              HasField "floatVal" TensorProto TensorProto 
                              HasField "halfVal" TensorProto TensorProto 
                              HasField "int64Val" TensorProto TensorProto 
                              HasField "intVal" TensorProto TensorProto 
                              HasField "maybe'tensorShape" TensorProto TensorProto 
                              HasField "resourceHandleVal" TensorProto TensorProto 
                              HasField "scomplexVal" TensorProto TensorProto 
                              HasField "stringVal" TensorProto TensorProto 
                              HasField "tensorContent" TensorProto TensorProto 
                              HasField "tensorShape" TensorProto TensorProto 
                              HasField "versionNumber" TensorProto TensorProto 
                              type Field "boolVal" TensorProto = [Bool] 
                              type Field "dcomplexVal" TensorProto = [Double] 
                              type Field "doubleVal" TensorProto = [Double] 
                              type Field "dtype" TensorProto = DataType 
                              type Field "floatVal" TensorProto = [Float] 
                              type Field "halfVal" TensorProto = [Int32] 
                              type Field "int64Val" TensorProto = [Int64] 
                              type Field "intVal" TensorProto = [Int32] 
                              type Field "maybe'tensorShape" TensorProto = Maybe TensorShapeProto 
                              type Field "resourceHandleVal" TensorProto = [ResourceHandle] 
                              type Field "scomplexVal" TensorProto = [Float] 
                              type Field "stringVal" TensorProto = [ByteString] 
                              type Field "tensorContent" TensorProto = ByteString 
                              type Field "tensorShape" TensorProto = TensorShapeProto 
                              type Field "versionNumber" TensorProto = Int32 

                              boolVal :: forall msg msg'. HasField "boolVal" msg msg' => Lens msg msg' (Field "boolVal" msg) (Field "boolVal" msg')

                              dcomplexVal :: forall msg msg'. HasField "dcomplexVal" msg msg' => Lens msg msg' (Field "dcomplexVal" msg) (Field "dcomplexVal" msg')

                              doubleVal :: forall msg msg'. HasField "doubleVal" msg msg' => Lens msg msg' (Field "doubleVal" msg) (Field "doubleVal" msg')

                              dtype :: forall msg msg'. HasField "dtype" msg msg' => Lens msg msg' (Field "dtype" msg) (Field "dtype" msg')

                              floatVal :: forall msg msg'. HasField "floatVal" msg msg' => Lens msg msg' (Field "floatVal" msg) (Field "floatVal" msg')

                              halfVal :: forall msg msg'. HasField "halfVal" msg msg' => Lens msg msg' (Field "halfVal" msg) (Field "halfVal" msg')

                              int64Val :: forall msg msg'. HasField "int64Val" msg msg' => Lens msg msg' (Field "int64Val" msg) (Field "int64Val" msg')

                              intVal :: forall msg msg'. HasField "intVal" msg msg' => Lens msg msg' (Field "intVal" msg) (Field "intVal" msg')

                              maybe'tensorShape :: forall msg msg'. HasField "maybe'tensorShape" msg msg' => Lens msg msg' (Field "maybe'tensorShape" msg) (Field "maybe'tensorShape" msg')

                              resourceHandleVal :: forall msg msg'. HasField "resourceHandleVal" msg msg' => Lens msg msg' (Field "resourceHandleVal" msg) (Field "resourceHandleVal" msg')

                              scomplexVal :: forall msg msg'. HasField "scomplexVal" msg msg' => Lens msg msg' (Field "scomplexVal" msg) (Field "scomplexVal" msg')

                              stringVal :: forall msg msg'. HasField "stringVal" msg msg' => Lens msg msg' (Field "stringVal" msg) (Field "stringVal" msg')

                              tensorContent :: forall msg msg'. HasField "tensorContent" msg msg' => Lens msg msg' (Field "tensorContent" msg) (Field "tensorContent" msg')

                              tensorShape :: forall msg msg'. HasField "tensorShape" msg msg' => Lens msg msg' (Field "tensorShape" msg) (Field "tensorShape" msg')

                              versionNumber :: forall msg msg'. HasField "versionNumber" msg msg' => Lens msg msg' (Field "versionNumber" msg) (Field "versionNumber" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Tensor

                              Documentation

                              data TensorProto Source #

                              Instances

                              Eq TensorProto Source # 
                              Ord TensorProto Source # 
                              Show TensorProto Source # 
                              Message TensorProto Source # 

                              Methods

                              descriptor :: MessageDescriptor TensorProto

                              Default TensorProto Source # 

                              Methods

                              def :: TensorProto

                              ((~) * a [Bool], (~) * b [Bool], Functor f) => HasLens "boolVal" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "boolVal" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a [Double], (~) * b [Double], Functor f) => HasLens "dcomplexVal" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "dcomplexVal" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a [Double], (~) * b [Double], Functor f) => HasLens "doubleVal" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "doubleVal" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "dtype" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "dtype" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a [Float], (~) * b [Float], Functor f) => HasLens "floatVal" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "floatVal" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a [Int32], (~) * b [Int32], Functor f) => HasLens "halfVal" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "halfVal" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a [Int64], (~) * b [Int64], Functor f) => HasLens "int64Val" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "int64Val" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a [Int32], (~) * b [Int32], Functor f) => HasLens "intVal" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "intVal" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a (Maybe TensorShapeProto), (~) * b (Maybe TensorShapeProto), Functor f) => HasLens "maybe'tensorShape" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'tensorShape" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a [ResourceHandleProto], (~) * b [ResourceHandleProto], Functor f) => HasLens "resourceHandleVal" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "resourceHandleVal" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a [Float], (~) * b [Float], Functor f) => HasLens "scomplexVal" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "scomplexVal" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a [ByteString], (~) * b [ByteString], Functor f) => HasLens "stringVal" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "stringVal" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a ByteString, (~) * b ByteString, Functor f) => HasLens "tensorContent" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensorContent" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a TensorShapeProto, (~) * b TensorShapeProto, Functor f) => HasLens "tensorShape" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensorShape" -> (a -> f b) -> TensorProto -> f TensorProto

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "versionNumber" f TensorProto TensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "versionNumber" -> (a -> f b) -> TensorProto -> f TensorProto

                              boolVal :: forall f s t a b. HasLens "boolVal" f s t a b => LensLike f s t a b Source #

                              dcomplexVal :: forall f s t a b. HasLens "dcomplexVal" f s t a b => LensLike f s t a b Source #

                              doubleVal :: forall f s t a b. HasLens "doubleVal" f s t a b => LensLike f s t a b Source #

                              dtype :: forall f s t a b. HasLens "dtype" f s t a b => LensLike f s t a b Source #

                              floatVal :: forall f s t a b. HasLens "floatVal" f s t a b => LensLike f s t a b Source #

                              halfVal :: forall f s t a b. HasLens "halfVal" f s t a b => LensLike f s t a b Source #

                              int64Val :: forall f s t a b. HasLens "int64Val" f s t a b => LensLike f s t a b Source #

                              intVal :: forall f s t a b. HasLens "intVal" f s t a b => LensLike f s t a b Source #

                              maybe'tensorShape :: forall f s t a b. HasLens "maybe'tensorShape" f s t a b => LensLike f s t a b Source #

                              resourceHandleVal :: forall f s t a b. HasLens "resourceHandleVal" f s t a b => LensLike f s t a b Source #

                              scomplexVal :: forall f s t a b. HasLens "scomplexVal" f s t a b => LensLike f s t a b Source #

                              stringVal :: forall f s t a b. HasLens "stringVal" f s t a b => LensLike f s t a b Source #

                              tensorContent :: forall f s t a b. HasLens "tensorContent" f s t a b => LensLike f s t a b Source #

                              tensorShape :: forall f s t a b. HasLens "tensorShape" f s t a b => LensLike f s t a b Source #

                              versionNumber :: forall f s t a b. HasLens "versionNumber" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorDescription.html new file mode 100644 index 0000000..9a42b2d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorDescription.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.TensorDescription

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.TensorDescription

                              Documentation

                              data TensorDescription Source #

                              Instances

                              Eq TensorDescription Source # 
                              Ord TensorDescription Source # 
                              Show TensorDescription Source # 
                              Message TensorDescription Source # 

                              Methods

                              descriptor :: MessageDescriptor TensorDescription

                              Default TensorDescription Source # 
                              ((~) * a AllocationDescription, (~) * b AllocationDescription, Functor f) => HasLens "allocationDescription" f TensorDescription TensorDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocationDescription" -> (a -> f b) -> TensorDescription -> f TensorDescription

                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "dtype" f TensorDescription TensorDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "dtype" -> (a -> f b) -> TensorDescription -> f TensorDescription

                              ((~) * a (Maybe AllocationDescription), (~) * b (Maybe AllocationDescription), Functor f) => HasLens "maybe'allocationDescription" f TensorDescription TensorDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'allocationDescription" -> (a -> f b) -> TensorDescription -> f TensorDescription

                              ((~) * a (Maybe TensorShapeProto), (~) * b (Maybe TensorShapeProto), Functor f) => HasLens "maybe'shape" f TensorDescription TensorDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'shape" -> (a -> f b) -> TensorDescription -> f TensorDescription

                              ((~) * a TensorShapeProto, (~) * b TensorShapeProto, Functor f) => HasLens "shape" f TensorDescription TensorDescription a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "shape" -> (a -> f b) -> TensorDescription -> f TensorDescription

                              allocationDescription :: forall f s t a b. HasLens "allocationDescription" f s t a b => LensLike f s t a b Source #

                              dtype :: forall f s t a b. HasLens "dtype" f s t a b => LensLike f s t a b Source #

                              maybe'allocationDescription :: forall f s t a b. HasLens "maybe'allocationDescription" f s t a b => LensLike f s t a b Source #

                              maybe'shape :: forall f s t a b. HasLens "maybe'shape" f s t a b => LensLike f s t a b Source #

                              shape :: forall f s t a b. HasLens "shape" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html index f48b230..e5d3ee5 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.TensorShape

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.TensorShape

                              Documentation

                              dim :: forall msg msg'. HasField "dim" msg msg' => Lens msg msg' (Field "dim" msg) (Field "dim" msg')

                              name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

                              size :: forall msg msg'. HasField "size" msg msg' => Lens msg msg' (Field "size" msg) (Field "size" msg')

                              unknownRank :: forall msg msg'. HasField "unknownRank" msg msg' => Lens msg msg' (Field "unknownRank" msg) (Field "unknownRank" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.TensorShape

                              Documentation

                              data TensorShapeProto Source #

                              data TensorShapeProto'Dim Source #

                              dim :: forall f s t a b. HasLens "dim" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              size :: forall f s t a b. HasLens "size" f s t a b => LensLike f s t a b Source #

                              unknownRank :: forall f s t a b. HasLens "unknownRank" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorSlice.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorSlice.html new file mode 100644 index 0000000..cc49fb9 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorSlice.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.TensorSlice

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.TensorSlice

                              Documentation

                              data TensorSliceProto'Extent Source #

                              Instances

                              Eq TensorSliceProto'Extent Source # 
                              Ord TensorSliceProto'Extent Source # 
                              Show TensorSliceProto'Extent Source # 
                              Message TensorSliceProto'Extent Source # 

                              Methods

                              descriptor :: MessageDescriptor TensorSliceProto'Extent

                              Default TensorSliceProto'Extent Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "length" f TensorSliceProto'Extent TensorSliceProto'Extent a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "length" -> (a -> f b) -> TensorSliceProto'Extent -> f TensorSliceProto'Extent

                              ((~) * a (Maybe TensorSliceProto'Extent'HasLength), (~) * b (Maybe TensorSliceProto'Extent'HasLength), Functor f) => HasLens "maybe'hasLength" f TensorSliceProto'Extent TensorSliceProto'Extent a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'hasLength" -> (a -> f b) -> TensorSliceProto'Extent -> f TensorSliceProto'Extent

                              ((~) * a (Maybe Int64), (~) * b (Maybe Int64), Functor f) => HasLens "maybe'length" f TensorSliceProto'Extent TensorSliceProto'Extent a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'length" -> (a -> f b) -> TensorSliceProto'Extent -> f TensorSliceProto'Extent

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "start" f TensorSliceProto'Extent TensorSliceProto'Extent a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "start" -> (a -> f b) -> TensorSliceProto'Extent -> f TensorSliceProto'Extent

                              extent :: forall f s t a b. HasLens "extent" f s t a b => LensLike f s t a b Source #

                              length :: forall f s t a b. HasLens "length" f s t a b => LensLike f s t a b Source #

                              maybe'hasLength :: forall f s t a b. HasLens "maybe'hasLength" f s t a b => LensLike f s t a b Source #

                              maybe'length :: forall f s t a b. HasLens "maybe'length" f s t a b => LensLike f s t a b Source #

                              start :: forall f s t a b. HasLens "start" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html index c0c81dd..3f32de6 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.Types

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Types

                              Documentation

                              data DataType Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Variable.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Variable.html new file mode 100644 index 0000000..c11dffa --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Variable.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Variable

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Variable

                              Documentation

                              data SaveSliceInfoDef Source #

                              Instances

                              Eq SaveSliceInfoDef Source # 
                              Ord SaveSliceInfoDef Source # 
                              Show SaveSliceInfoDef Source # 
                              Message SaveSliceInfoDef Source # 

                              Methods

                              descriptor :: MessageDescriptor SaveSliceInfoDef

                              Default SaveSliceInfoDef Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "fullName" f SaveSliceInfoDef SaveSliceInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "fullName" -> (a -> f b) -> SaveSliceInfoDef -> f SaveSliceInfoDef

                              ((~) * a [Int64], (~) * b [Int64], Functor f) => HasLens "fullShape" f SaveSliceInfoDef SaveSliceInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "fullShape" -> (a -> f b) -> SaveSliceInfoDef -> f SaveSliceInfoDef

                              ((~) * a [Int64], (~) * b [Int64], Functor f) => HasLens "varOffset" f SaveSliceInfoDef SaveSliceInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "varOffset" -> (a -> f b) -> SaveSliceInfoDef -> f SaveSliceInfoDef

                              ((~) * a [Int64], (~) * b [Int64], Functor f) => HasLens "varShape" f SaveSliceInfoDef SaveSliceInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "varShape" -> (a -> f b) -> SaveSliceInfoDef -> f SaveSliceInfoDef

                              data VariableDef Source #

                              Instances

                              Eq VariableDef Source # 
                              Ord VariableDef Source # 
                              Show VariableDef Source # 
                              Message VariableDef Source # 

                              Methods

                              descriptor :: MessageDescriptor VariableDef

                              Default VariableDef Source # 

                              Methods

                              def :: VariableDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "initializerName" f VariableDef VariableDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "initializerName" -> (a -> f b) -> VariableDef -> f VariableDef

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "isResource" f VariableDef VariableDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "isResource" -> (a -> f b) -> VariableDef -> f VariableDef

                              ((~) * a (Maybe SaveSliceInfoDef), (~) * b (Maybe SaveSliceInfoDef), Functor f) => HasLens "maybe'saveSliceInfoDef" f VariableDef VariableDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'saveSliceInfoDef" -> (a -> f b) -> VariableDef -> f VariableDef

                              ((~) * a SaveSliceInfoDef, (~) * b SaveSliceInfoDef, Functor f) => HasLens "saveSliceInfoDef" f VariableDef VariableDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "saveSliceInfoDef" -> (a -> f b) -> VariableDef -> f VariableDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "snapshotName" f VariableDef VariableDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "snapshotName" -> (a -> f b) -> VariableDef -> f VariableDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "variableName" f VariableDef VariableDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "variableName" -> (a -> f b) -> VariableDef -> f VariableDef

                              fullName :: forall f s t a b. HasLens "fullName" f s t a b => LensLike f s t a b Source #

                              fullShape :: forall f s t a b. HasLens "fullShape" f s t a b => LensLike f s t a b Source #

                              initializerName :: forall f s t a b. HasLens "initializerName" f s t a b => LensLike f s t a b Source #

                              isResource :: forall f s t a b. HasLens "isResource" f s t a b => LensLike f s t a b Source #

                              maybe'saveSliceInfoDef :: forall f s t a b. HasLens "maybe'saveSliceInfoDef" f s t a b => LensLike f s t a b Source #

                              saveSliceInfoDef :: forall f s t a b. HasLens "saveSliceInfoDef" f s t a b => LensLike f s t a b Source #

                              snapshotName :: forall f s t a b. HasLens "snapshotName" f s t a b => LensLike f s t a b Source #

                              varOffset :: forall f s t a b. HasLens "varOffset" f s t a b => LensLike f s t a b Source #

                              varShape :: forall f s t a b. HasLens "varShape" f s t a b => LensLike f s t a b Source #

                              variableName :: forall f s t a b. HasLens "variableName" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Versions.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Versions.html new file mode 100644 index 0000000..98b3c48 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Versions.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Versions

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Framework.Versions

                              Documentation

                              data VersionDef Source #

                              Instances

                              Eq VersionDef Source # 
                              Ord VersionDef Source # 
                              Show VersionDef Source # 
                              Message VersionDef Source # 

                              Methods

                              descriptor :: MessageDescriptor VersionDef

                              Default VersionDef Source # 

                              Methods

                              def :: VersionDef

                              ((~) * a [Int32], (~) * b [Int32], Functor f) => HasLens "badConsumers" f VersionDef VersionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "badConsumers" -> (a -> f b) -> VersionDef -> f VersionDef

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "minConsumer" f VersionDef VersionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "minConsumer" -> (a -> f b) -> VersionDef -> f VersionDef

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "producer" f VersionDef VersionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "producer" -> (a -> f b) -> VersionDef -> f VersionDef

                              badConsumers :: forall f s t a b. HasLens "badConsumers" f s t a b => LensLike f s t a b Source #

                              minConsumer :: forall f s t a b. HasLens "minConsumer" f s t a b => LensLike f s t a b Source #

                              producer :: forall f s t a b. HasLens "producer" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Lib-Core-ErrorCodes.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Lib-Core-ErrorCodes.html new file mode 100644 index 0000000..15800c0 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Lib-Core-ErrorCodes.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Lib.Core.ErrorCodes

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Cluster.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Cluster.html new file mode 100644 index 0000000..64ab449 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Cluster.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.Cluster

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.Cluster

                              Documentation

                              data JobDef Source #

                              Constructors

                              JobDef 

                              Fields

                              Instances

                              Eq JobDef Source # 

                              Methods

                              (==) :: JobDef -> JobDef -> Bool #

                              (/=) :: JobDef -> JobDef -> Bool #

                              Ord JobDef Source # 
                              Show JobDef Source # 
                              Message JobDef Source # 

                              Methods

                              descriptor :: MessageDescriptor JobDef

                              Default JobDef Source # 

                              Methods

                              def :: JobDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f JobDef JobDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> JobDef -> f JobDef

                              ((~) * a (Map Int32 Text), (~) * b (Map Int32 Text), Functor f) => HasLens "tasks" f JobDef JobDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tasks" -> (a -> f b) -> JobDef -> f JobDef

                              data JobDef'TasksEntry Source #

                              job :: forall f s t a b. HasLens "job" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              tasks :: forall f s t a b. HasLens "tasks" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html index e8baf38..df31626 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Protobuf.Config

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.Config

                              Documentation

                              data ConfigProto

                              Instances

                              Eq ConfigProto 
                              Show ConfigProto 
                              Message ConfigProto 
                              Default ConfigProto 
                              HasField "allowSoftPlacement" ConfigProto ConfigProto 
                              HasField "deviceCount" ConfigProto ConfigProto 
                              HasField "deviceFilters" ConfigProto ConfigProto 
                              HasField "gpuOptions" ConfigProto ConfigProto 
                              HasField "graphOptions" ConfigProto ConfigProto 
                              HasField "interOpParallelismThreads" ConfigProto ConfigProto 
                              HasField "intraOpParallelismThreads" ConfigProto ConfigProto 
                              HasField "logDevicePlacement" ConfigProto ConfigProto 
                              HasField "maybe'gpuOptions" ConfigProto ConfigProto 
                              HasField "maybe'graphOptions" ConfigProto ConfigProto 
                              HasField "maybe'rpcOptions" ConfigProto ConfigProto 
                              HasField "operationTimeoutInMs" ConfigProto ConfigProto 
                              HasField "placementPeriod" ConfigProto ConfigProto 
                              HasField "rpcOptions" ConfigProto ConfigProto 
                              HasField "sessionInterOpThreadPool" ConfigProto ConfigProto 
                              HasField "usePerSessionThreads" ConfigProto ConfigProto 
                              type Field "allowSoftPlacement" ConfigProto = Bool 
                              type Field "deviceCount" ConfigProto = Map Text Int32 
                              type Field "deviceFilters" ConfigProto = [Text] 
                              type Field "gpuOptions" ConfigProto = GPUOptions 
                              type Field "graphOptions" ConfigProto = GraphOptions 
                              type Field "interOpParallelismThreads" ConfigProto = Int32 
                              type Field "intraOpParallelismThreads" ConfigProto = Int32 
                              type Field "logDevicePlacement" ConfigProto = Bool 
                              type Field "maybe'gpuOptions" ConfigProto = Maybe GPUOptions 
                              type Field "maybe'graphOptions" ConfigProto = Maybe GraphOptions 
                              type Field "maybe'rpcOptions" ConfigProto = Maybe RPCOptions 
                              type Field "operationTimeoutInMs" ConfigProto = Int64 
                              type Field "placementPeriod" ConfigProto = Int32 
                              type Field "rpcOptions" ConfigProto = RPCOptions 
                              type Field "sessionInterOpThreadPool" ConfigProto = [ThreadPoolOptionProto] 
                              type Field "usePerSessionThreads" ConfigProto = Bool 

                              data GPUOptions

                              Instances

                              Eq GPUOptions 
                              Show GPUOptions 
                              Message GPUOptions 
                              Default GPUOptions 
                              HasField "allocatorType" GPUOptions GPUOptions 
                              HasField "allowGrowth" GPUOptions GPUOptions 
                              HasField "deferredDeletionBytes" GPUOptions GPUOptions 
                              HasField "perProcessGpuMemoryFraction" GPUOptions GPUOptions 
                              HasField "visibleDeviceList" GPUOptions GPUOptions 
                              type Field "allocatorType" GPUOptions = Text 
                              type Field "allowGrowth" GPUOptions = Bool 
                              type Field "deferredDeletionBytes" GPUOptions = Int64 
                              type Field "perProcessGpuMemoryFraction" GPUOptions = Double 
                              type Field "visibleDeviceList" GPUOptions = Text 

                              data GraphOptions

                              Instances

                              Eq GraphOptions 
                              Show GraphOptions 
                              Message GraphOptions 
                              Default GraphOptions 
                              HasField "buildCostModel" GraphOptions GraphOptions 
                              HasField "buildCostModelAfter" GraphOptions GraphOptions 
                              HasField "enableBfloat16Sendrecv" GraphOptions GraphOptions 
                              HasField "enableRecvScheduling" GraphOptions GraphOptions 
                              HasField "inferShapes" GraphOptions GraphOptions 
                              HasField "maybe'optimizerOptions" GraphOptions GraphOptions 
                              HasField "optimizerOptions" GraphOptions GraphOptions 
                              HasField "placePrunedGraph" GraphOptions GraphOptions 
                              HasField "timelineStep" GraphOptions GraphOptions 
                              type Field "buildCostModel" GraphOptions = Int64 
                              type Field "buildCostModelAfter" GraphOptions = Int64 
                              type Field "enableBfloat16Sendrecv" GraphOptions = Bool 
                              type Field "enableRecvScheduling" GraphOptions = Bool 
                              type Field "inferShapes" GraphOptions = Bool 
                              type Field "maybe'optimizerOptions" GraphOptions = Maybe OptimizerOptions 
                              type Field "optimizerOptions" GraphOptions = OptimizerOptions 
                              type Field "placePrunedGraph" GraphOptions = Bool 
                              type Field "timelineStep" GraphOptions = Int32 

                              data OptimizerOptions

                              Instances

                              Eq OptimizerOptions 
                              Show OptimizerOptions 
                              Message OptimizerOptions 
                              Default OptimizerOptions 
                              HasField "doCommonSubexpressionElimination" OptimizerOptions OptimizerOptions 
                              HasField "doConstantFolding" OptimizerOptions OptimizerOptions 
                              HasField "doFunctionInlining" OptimizerOptions OptimizerOptions 
                              HasField "globalJitLevel" OptimizerOptions OptimizerOptions 
                              HasField "optLevel" OptimizerOptions OptimizerOptions 
                              type Field "doCommonSubexpressionElimination" OptimizerOptions = Bool 
                              type Field "doConstantFolding" OptimizerOptions = Bool 
                              type Field "doFunctionInlining" OptimizerOptions = Bool 
                              type Field "globalJitLevel" OptimizerOptions = OptimizerOptions'GlobalJitLevel 
                              type Field "optLevel" OptimizerOptions = OptimizerOptions'Level 

                              data RPCOptions

                              Instances

                              Eq RPCOptions 
                              Show RPCOptions 
                              Message RPCOptions 
                              Default RPCOptions 
                              HasField "useRpcForInprocessMaster" RPCOptions RPCOptions 
                              type Field "useRpcForInprocessMaster" RPCOptions = Bool 

                              data RunMetadata

                              Constructors

                              RunMetadata 

                              Instances

                              Eq RunMetadata 
                              Show RunMetadata 
                              Message RunMetadata 
                              Default RunMetadata 
                              HasField "costGraph" RunMetadata RunMetadata 
                              HasField "maybe'costGraph" RunMetadata RunMetadata 
                              HasField "maybe'stepStats" RunMetadata RunMetadata 
                              HasField "partitionGraphs" RunMetadata RunMetadata 
                              HasField "stepStats" RunMetadata RunMetadata 
                              type Field "costGraph" RunMetadata 
                              type Field "maybe'costGraph" RunMetadata 
                              type Field "maybe'stepStats" RunMetadata 
                              type Field "partitionGraphs" RunMetadata = [GraphDef] 
                              type Field "stepStats" RunMetadata 

                              data RunOptions

                              Instances

                              Eq RunOptions 
                              Show RunOptions 
                              Message RunOptions 
                              Default RunOptions 
                              HasField "debugOptions" RunOptions RunOptions 
                              HasField "interOpThreadPool" RunOptions RunOptions 
                              HasField "maybe'debugOptions" RunOptions RunOptions 
                              HasField "outputPartitionGraphs" RunOptions RunOptions 
                              HasField "timeoutInMs" RunOptions RunOptions 
                              HasField "traceLevel" RunOptions RunOptions 
                              type Field "debugOptions" RunOptions 
                              type Field "interOpThreadPool" RunOptions = Int32 
                              type Field "maybe'debugOptions" RunOptions 
                              type Field "outputPartitionGraphs" RunOptions = Bool 
                              type Field "timeoutInMs" RunOptions = Int64 
                              type Field "traceLevel" RunOptions = RunOptions'TraceLevel 

                              allocatorType :: forall msg msg'. HasField "allocatorType" msg msg' => Lens msg msg' (Field "allocatorType" msg) (Field "allocatorType" msg')

                              allowGrowth :: forall msg msg'. HasField "allowGrowth" msg msg' => Lens msg msg' (Field "allowGrowth" msg) (Field "allowGrowth" msg')

                              allowSoftPlacement :: forall msg msg'. HasField "allowSoftPlacement" msg msg' => Lens msg msg' (Field "allowSoftPlacement" msg) (Field "allowSoftPlacement" msg')

                              buildCostModel :: forall msg msg'. HasField "buildCostModel" msg msg' => Lens msg msg' (Field "buildCostModel" msg) (Field "buildCostModel" msg')

                              buildCostModelAfter :: forall msg msg'. HasField "buildCostModelAfter" msg msg' => Lens msg msg' (Field "buildCostModelAfter" msg) (Field "buildCostModelAfter" msg')

                              costGraph :: forall msg msg'. HasField "costGraph" msg msg' => Lens msg msg' (Field "costGraph" msg) (Field "costGraph" msg')

                              debugOptions :: forall msg msg'. HasField "debugOptions" msg msg' => Lens msg msg' (Field "debugOptions" msg) (Field "debugOptions" msg')

                              deferredDeletionBytes :: forall msg msg'. HasField "deferredDeletionBytes" msg msg' => Lens msg msg' (Field "deferredDeletionBytes" msg) (Field "deferredDeletionBytes" msg')

                              deviceCount :: forall msg msg'. HasField "deviceCount" msg msg' => Lens msg msg' (Field "deviceCount" msg) (Field "deviceCount" msg')

                              deviceFilters :: forall msg msg'. HasField "deviceFilters" msg msg' => Lens msg msg' (Field "deviceFilters" msg) (Field "deviceFilters" msg')

                              doCommonSubexpressionElimination :: forall msg msg'. HasField "doCommonSubexpressionElimination" msg msg' => Lens msg msg' (Field "doCommonSubexpressionElimination" msg) (Field "doCommonSubexpressionElimination" msg')

                              doConstantFolding :: forall msg msg'. HasField "doConstantFolding" msg msg' => Lens msg msg' (Field "doConstantFolding" msg) (Field "doConstantFolding" msg')

                              doFunctionInlining :: forall msg msg'. HasField "doFunctionInlining" msg msg' => Lens msg msg' (Field "doFunctionInlining" msg) (Field "doFunctionInlining" msg')

                              enableBfloat16Sendrecv :: forall msg msg'. HasField "enableBfloat16Sendrecv" msg msg' => Lens msg msg' (Field "enableBfloat16Sendrecv" msg) (Field "enableBfloat16Sendrecv" msg')

                              enableRecvScheduling :: forall msg msg'. HasField "enableRecvScheduling" msg msg' => Lens msg msg' (Field "enableRecvScheduling" msg) (Field "enableRecvScheduling" msg')

                              globalJitLevel :: forall msg msg'. HasField "globalJitLevel" msg msg' => Lens msg msg' (Field "globalJitLevel" msg) (Field "globalJitLevel" msg')

                              gpuOptions :: forall msg msg'. HasField "gpuOptions" msg msg' => Lens msg msg' (Field "gpuOptions" msg) (Field "gpuOptions" msg')

                              graphOptions :: forall msg msg'. HasField "graphOptions" msg msg' => Lens msg msg' (Field "graphOptions" msg) (Field "graphOptions" msg')

                              inferShapes :: forall msg msg'. HasField "inferShapes" msg msg' => Lens msg msg' (Field "inferShapes" msg) (Field "inferShapes" msg')

                              interOpParallelismThreads :: forall msg msg'. HasField "interOpParallelismThreads" msg msg' => Lens msg msg' (Field "interOpParallelismThreads" msg) (Field "interOpParallelismThreads" msg')

                              interOpThreadPool :: forall msg msg'. HasField "interOpThreadPool" msg msg' => Lens msg msg' (Field "interOpThreadPool" msg) (Field "interOpThreadPool" msg')

                              intraOpParallelismThreads :: forall msg msg'. HasField "intraOpParallelismThreads" msg msg' => Lens msg msg' (Field "intraOpParallelismThreads" msg) (Field "intraOpParallelismThreads" msg')

                              key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg')

                              logDevicePlacement :: forall msg msg'. HasField "logDevicePlacement" msg msg' => Lens msg msg' (Field "logDevicePlacement" msg) (Field "logDevicePlacement" msg')

                              maybe'costGraph :: forall msg msg'. HasField "maybe'costGraph" msg msg' => Lens msg msg' (Field "maybe'costGraph" msg) (Field "maybe'costGraph" msg')

                              maybe'debugOptions :: forall msg msg'. HasField "maybe'debugOptions" msg msg' => Lens msg msg' (Field "maybe'debugOptions" msg) (Field "maybe'debugOptions" msg')

                              maybe'gpuOptions :: forall msg msg'. HasField "maybe'gpuOptions" msg msg' => Lens msg msg' (Field "maybe'gpuOptions" msg) (Field "maybe'gpuOptions" msg')

                              maybe'graphOptions :: forall msg msg'. HasField "maybe'graphOptions" msg msg' => Lens msg msg' (Field "maybe'graphOptions" msg) (Field "maybe'graphOptions" msg')

                              maybe'optimizerOptions :: forall msg msg'. HasField "maybe'optimizerOptions" msg msg' => Lens msg msg' (Field "maybe'optimizerOptions" msg) (Field "maybe'optimizerOptions" msg')

                              maybe'rpcOptions :: forall msg msg'. HasField "maybe'rpcOptions" msg msg' => Lens msg msg' (Field "maybe'rpcOptions" msg) (Field "maybe'rpcOptions" msg')

                              maybe'stepStats :: forall msg msg'. HasField "maybe'stepStats" msg msg' => Lens msg msg' (Field "maybe'stepStats" msg) (Field "maybe'stepStats" msg')

                              numThreads :: forall msg msg'. HasField "numThreads" msg msg' => Lens msg msg' (Field "numThreads" msg) (Field "numThreads" msg')

                              operationTimeoutInMs :: forall msg msg'. HasField "operationTimeoutInMs" msg msg' => Lens msg msg' (Field "operationTimeoutInMs" msg) (Field "operationTimeoutInMs" msg')

                              optLevel :: forall msg msg'. HasField "optLevel" msg msg' => Lens msg msg' (Field "optLevel" msg) (Field "optLevel" msg')

                              optimizerOptions :: forall msg msg'. HasField "optimizerOptions" msg msg' => Lens msg msg' (Field "optimizerOptions" msg) (Field "optimizerOptions" msg')

                              outputPartitionGraphs :: forall msg msg'. HasField "outputPartitionGraphs" msg msg' => Lens msg msg' (Field "outputPartitionGraphs" msg) (Field "outputPartitionGraphs" msg')

                              partitionGraphs :: forall msg msg'. HasField "partitionGraphs" msg msg' => Lens msg msg' (Field "partitionGraphs" msg) (Field "partitionGraphs" msg')

                              perProcessGpuMemoryFraction :: forall msg msg'. HasField "perProcessGpuMemoryFraction" msg msg' => Lens msg msg' (Field "perProcessGpuMemoryFraction" msg) (Field "perProcessGpuMemoryFraction" msg')

                              placePrunedGraph :: forall msg msg'. HasField "placePrunedGraph" msg msg' => Lens msg msg' (Field "placePrunedGraph" msg) (Field "placePrunedGraph" msg')

                              placementPeriod :: forall msg msg'. HasField "placementPeriod" msg msg' => Lens msg msg' (Field "placementPeriod" msg) (Field "placementPeriod" msg')

                              rpcOptions :: forall msg msg'. HasField "rpcOptions" msg msg' => Lens msg msg' (Field "rpcOptions" msg) (Field "rpcOptions" msg')

                              sessionInterOpThreadPool :: forall msg msg'. HasField "sessionInterOpThreadPool" msg msg' => Lens msg msg' (Field "sessionInterOpThreadPool" msg) (Field "sessionInterOpThreadPool" msg')

                              stepStats :: forall msg msg'. HasField "stepStats" msg msg' => Lens msg msg' (Field "stepStats" msg) (Field "stepStats" msg')

                              timelineStep :: forall msg msg'. HasField "timelineStep" msg msg' => Lens msg msg' (Field "timelineStep" msg) (Field "timelineStep" msg')

                              timeoutInMs :: forall msg msg'. HasField "timeoutInMs" msg msg' => Lens msg msg' (Field "timeoutInMs" msg) (Field "timeoutInMs" msg')

                              traceLevel :: forall msg msg'. HasField "traceLevel" msg msg' => Lens msg msg' (Field "traceLevel" msg) (Field "traceLevel" msg')

                              usePerSessionThreads :: forall msg msg'. HasField "usePerSessionThreads" msg msg' => Lens msg msg' (Field "usePerSessionThreads" msg) (Field "usePerSessionThreads" msg')

                              useRpcForInprocessMaster :: forall msg msg'. HasField "useRpcForInprocessMaster" msg msg' => Lens msg msg' (Field "useRpcForInprocessMaster" msg) (Field "useRpcForInprocessMaster" msg')

                              value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg')

                              visibleDeviceList :: forall msg msg'. HasField "visibleDeviceList" msg msg' => Lens msg msg' (Field "visibleDeviceList" msg) (Field "visibleDeviceList" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.Config

                              Documentation

                              data ConfigProto Source #

                              Instances

                              Eq ConfigProto Source # 
                              Ord ConfigProto Source # 
                              Show ConfigProto Source # 
                              Message ConfigProto Source # 

                              Methods

                              descriptor :: MessageDescriptor ConfigProto

                              Default ConfigProto Source # 

                              Methods

                              def :: ConfigProto

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "allowSoftPlacement" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allowSoftPlacement" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a ClusterDef, (~) * b ClusterDef, Functor f) => HasLens "clusterDef" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "clusterDef" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a (Map Text Int32), (~) * b (Map Text Int32), Functor f) => HasLens "deviceCount" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deviceCount" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "deviceFilters" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deviceFilters" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a GPUOptions, (~) * b GPUOptions, Functor f) => HasLens "gpuOptions" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "gpuOptions" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a GraphOptions, (~) * b GraphOptions, Functor f) => HasLens "graphOptions" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "graphOptions" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "interOpParallelismThreads" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "interOpParallelismThreads" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "intraOpParallelismThreads" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "intraOpParallelismThreads" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "logDevicePlacement" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "logDevicePlacement" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a (Maybe ClusterDef), (~) * b (Maybe ClusterDef), Functor f) => HasLens "maybe'clusterDef" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'clusterDef" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a (Maybe GPUOptions), (~) * b (Maybe GPUOptions), Functor f) => HasLens "maybe'gpuOptions" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'gpuOptions" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a (Maybe GraphOptions), (~) * b (Maybe GraphOptions), Functor f) => HasLens "maybe'graphOptions" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'graphOptions" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a (Maybe RPCOptions), (~) * b (Maybe RPCOptions), Functor f) => HasLens "maybe'rpcOptions" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'rpcOptions" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "operationTimeoutInMs" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "operationTimeoutInMs" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "placementPeriod" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "placementPeriod" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a RPCOptions, (~) * b RPCOptions, Functor f) => HasLens "rpcOptions" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "rpcOptions" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a [ThreadPoolOptionProto], (~) * b [ThreadPoolOptionProto], Functor f) => HasLens "sessionInterOpThreadPool" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "sessionInterOpThreadPool" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "usePerSessionThreads" f ConfigProto ConfigProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "usePerSessionThreads" -> (a -> f b) -> ConfigProto -> f ConfigProto

                              data ConfigProto'DeviceCountEntry Source #

                              Instances

                              Eq ConfigProto'DeviceCountEntry Source # 
                              Ord ConfigProto'DeviceCountEntry Source # 
                              Show ConfigProto'DeviceCountEntry Source # 
                              Message ConfigProto'DeviceCountEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor ConfigProto'DeviceCountEntry

                              Default ConfigProto'DeviceCountEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f ConfigProto'DeviceCountEntry ConfigProto'DeviceCountEntry a b Source # 
                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "value" f ConfigProto'DeviceCountEntry ConfigProto'DeviceCountEntry a b Source # 

                              data GPUOptions Source #

                              Instances

                              Eq GPUOptions Source # 
                              Ord GPUOptions Source # 
                              Show GPUOptions Source # 
                              Message GPUOptions Source # 

                              Methods

                              descriptor :: MessageDescriptor GPUOptions

                              Default GPUOptions Source # 

                              Methods

                              def :: GPUOptions

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "allocatorType" f GPUOptions GPUOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allocatorType" -> (a -> f b) -> GPUOptions -> f GPUOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "allowGrowth" f GPUOptions GPUOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "allowGrowth" -> (a -> f b) -> GPUOptions -> f GPUOptions

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "deferredDeletionBytes" f GPUOptions GPUOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deferredDeletionBytes" -> (a -> f b) -> GPUOptions -> f GPUOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "forceGpuCompatible" f GPUOptions GPUOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "forceGpuCompatible" -> (a -> f b) -> GPUOptions -> f GPUOptions

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "perProcessGpuMemoryFraction" f GPUOptions GPUOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "perProcessGpuMemoryFraction" -> (a -> f b) -> GPUOptions -> f GPUOptions

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "pollingActiveDelayUsecs" f GPUOptions GPUOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "pollingActiveDelayUsecs" -> (a -> f b) -> GPUOptions -> f GPUOptions

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "pollingInactiveDelayMsecs" f GPUOptions GPUOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "pollingInactiveDelayMsecs" -> (a -> f b) -> GPUOptions -> f GPUOptions

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "visibleDeviceList" f GPUOptions GPUOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "visibleDeviceList" -> (a -> f b) -> GPUOptions -> f GPUOptions

                              data GraphOptions Source #

                              Instances

                              Eq GraphOptions Source # 
                              Ord GraphOptions Source # 
                              Show GraphOptions Source # 
                              Message GraphOptions Source # 

                              Methods

                              descriptor :: MessageDescriptor GraphOptions

                              Default GraphOptions Source # 

                              Methods

                              def :: GraphOptions

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "buildCostModel" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "buildCostModel" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "buildCostModelAfter" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "buildCostModelAfter" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "enableBfloat16Sendrecv" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "enableBfloat16Sendrecv" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "enableRecvScheduling" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "enableRecvScheduling" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "inferShapes" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "inferShapes" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a (Maybe OptimizerOptions), (~) * b (Maybe OptimizerOptions), Functor f) => HasLens "maybe'optimizerOptions" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'optimizerOptions" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a (Maybe RewriterConfig), (~) * b (Maybe RewriterConfig), Functor f) => HasLens "maybe'rewriteOptions" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'rewriteOptions" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a OptimizerOptions, (~) * b OptimizerOptions, Functor f) => HasLens "optimizerOptions" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "optimizerOptions" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "placePrunedGraph" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "placePrunedGraph" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a RewriterConfig, (~) * b RewriterConfig, Functor f) => HasLens "rewriteOptions" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "rewriteOptions" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "timelineStep" f GraphOptions GraphOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "timelineStep" -> (a -> f b) -> GraphOptions -> f GraphOptions

                              data OptimizerOptions Source #

                              Instances

                              Eq OptimizerOptions Source # 
                              Ord OptimizerOptions Source # 
                              Show OptimizerOptions Source # 
                              Message OptimizerOptions Source # 

                              Methods

                              descriptor :: MessageDescriptor OptimizerOptions

                              Default OptimizerOptions Source # 
                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "doCommonSubexpressionElimination" f OptimizerOptions OptimizerOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "doCommonSubexpressionElimination" -> (a -> f b) -> OptimizerOptions -> f OptimizerOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "doConstantFolding" f OptimizerOptions OptimizerOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "doConstantFolding" -> (a -> f b) -> OptimizerOptions -> f OptimizerOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "doFunctionInlining" f OptimizerOptions OptimizerOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "doFunctionInlining" -> (a -> f b) -> OptimizerOptions -> f OptimizerOptions

                              ((~) * a OptimizerOptions'GlobalJitLevel, (~) * b OptimizerOptions'GlobalJitLevel, Functor f) => HasLens "globalJitLevel" f OptimizerOptions OptimizerOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "globalJitLevel" -> (a -> f b) -> OptimizerOptions -> f OptimizerOptions

                              ((~) * a OptimizerOptions'Level, (~) * b OptimizerOptions'Level, Functor f) => HasLens "optLevel" f OptimizerOptions OptimizerOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "optLevel" -> (a -> f b) -> OptimizerOptions -> f OptimizerOptions

                              data OptimizerOptions'GlobalJitLevel Source #

                              Instances

                              Bounded OptimizerOptions'GlobalJitLevel Source # 
                              Enum OptimizerOptions'GlobalJitLevel Source # 
                              Eq OptimizerOptions'GlobalJitLevel Source # 
                              Ord OptimizerOptions'GlobalJitLevel Source # 
                              Show OptimizerOptions'GlobalJitLevel Source # 
                              MessageEnum OptimizerOptions'GlobalJitLevel Source # 
                              FieldDefault OptimizerOptions'GlobalJitLevel Source # 
                              Default OptimizerOptions'GlobalJitLevel Source # 

                              data OptimizerOptions'Level Source #

                              Instances

                              Bounded OptimizerOptions'Level Source # 
                              Enum OptimizerOptions'Level Source # 
                              Eq OptimizerOptions'Level Source # 
                              Ord OptimizerOptions'Level Source # 
                              Show OptimizerOptions'Level Source # 
                              MessageEnum OptimizerOptions'Level Source # 
                              FieldDefault OptimizerOptions'Level Source # 
                              Default OptimizerOptions'Level Source # 

                              data RPCOptions Source #

                              Instances

                              Eq RPCOptions Source # 
                              Ord RPCOptions Source # 
                              Show RPCOptions Source # 
                              Message RPCOptions Source # 

                              Methods

                              descriptor :: MessageDescriptor RPCOptions

                              Default RPCOptions Source # 

                              Methods

                              def :: RPCOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "useRpcForInprocessMaster" f RPCOptions RPCOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "useRpcForInprocessMaster" -> (a -> f b) -> RPCOptions -> f RPCOptions

                              data RunMetadata Source #

                              Instances

                              Eq RunMetadata Source # 
                              Ord RunMetadata Source # 
                              Show RunMetadata Source # 
                              Message RunMetadata Source # 

                              Methods

                              descriptor :: MessageDescriptor RunMetadata

                              Default RunMetadata Source # 

                              Methods

                              def :: RunMetadata

                              ((~) * a CostGraphDef, (~) * b CostGraphDef, Functor f) => HasLens "costGraph" f RunMetadata RunMetadata a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "costGraph" -> (a -> f b) -> RunMetadata -> f RunMetadata

                              ((~) * a (Maybe CostGraphDef), (~) * b (Maybe CostGraphDef), Functor f) => HasLens "maybe'costGraph" f RunMetadata RunMetadata a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'costGraph" -> (a -> f b) -> RunMetadata -> f RunMetadata

                              ((~) * a (Maybe StepStats), (~) * b (Maybe StepStats), Functor f) => HasLens "maybe'stepStats" f RunMetadata RunMetadata a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'stepStats" -> (a -> f b) -> RunMetadata -> f RunMetadata

                              ((~) * a [GraphDef], (~) * b [GraphDef], Functor f) => HasLens "partitionGraphs" f RunMetadata RunMetadata a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "partitionGraphs" -> (a -> f b) -> RunMetadata -> f RunMetadata

                              ((~) * a StepStats, (~) * b StepStats, Functor f) => HasLens "stepStats" f RunMetadata RunMetadata a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "stepStats" -> (a -> f b) -> RunMetadata -> f RunMetadata

                              data RunOptions Source #

                              Instances

                              Eq RunOptions Source # 
                              Ord RunOptions Source # 
                              Show RunOptions Source # 
                              Message RunOptions Source # 

                              Methods

                              descriptor :: MessageDescriptor RunOptions

                              Default RunOptions Source # 

                              Methods

                              def :: RunOptions

                              ((~) * a DebugOptions, (~) * b DebugOptions, Functor f) => HasLens "debugOptions" f RunOptions RunOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "debugOptions" -> (a -> f b) -> RunOptions -> f RunOptions

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "interOpThreadPool" f RunOptions RunOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "interOpThreadPool" -> (a -> f b) -> RunOptions -> f RunOptions

                              ((~) * a (Maybe DebugOptions), (~) * b (Maybe DebugOptions), Functor f) => HasLens "maybe'debugOptions" f RunOptions RunOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'debugOptions" -> (a -> f b) -> RunOptions -> f RunOptions

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "outputPartitionGraphs" f RunOptions RunOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "outputPartitionGraphs" -> (a -> f b) -> RunOptions -> f RunOptions

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "timeoutInMs" f RunOptions RunOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "timeoutInMs" -> (a -> f b) -> RunOptions -> f RunOptions

                              ((~) * a RunOptions'TraceLevel, (~) * b RunOptions'TraceLevel, Functor f) => HasLens "traceLevel" f RunOptions RunOptions a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "traceLevel" -> (a -> f b) -> RunOptions -> f RunOptions

                              data RunOptions'TraceLevel Source #

                              Instances

                              Bounded RunOptions'TraceLevel Source # 
                              Enum RunOptions'TraceLevel Source # 
                              Eq RunOptions'TraceLevel Source # 
                              Ord RunOptions'TraceLevel Source # 
                              Show RunOptions'TraceLevel Source # 
                              MessageEnum RunOptions'TraceLevel Source # 
                              FieldDefault RunOptions'TraceLevel Source # 
                              Default RunOptions'TraceLevel Source # 

                              data ThreadPoolOptionProto Source #

                              Instances

                              Eq ThreadPoolOptionProto Source # 
                              Ord ThreadPoolOptionProto Source # 
                              Show ThreadPoolOptionProto Source # 
                              Message ThreadPoolOptionProto Source # 

                              Methods

                              descriptor :: MessageDescriptor ThreadPoolOptionProto

                              Default ThreadPoolOptionProto Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "globalName" f ThreadPoolOptionProto ThreadPoolOptionProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "globalName" -> (a -> f b) -> ThreadPoolOptionProto -> f ThreadPoolOptionProto

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "numThreads" f ThreadPoolOptionProto ThreadPoolOptionProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "numThreads" -> (a -> f b) -> ThreadPoolOptionProto -> f ThreadPoolOptionProto

                              allocatorType :: forall f s t a b. HasLens "allocatorType" f s t a b => LensLike f s t a b Source #

                              allowGrowth :: forall f s t a b. HasLens "allowGrowth" f s t a b => LensLike f s t a b Source #

                              allowSoftPlacement :: forall f s t a b. HasLens "allowSoftPlacement" f s t a b => LensLike f s t a b Source #

                              buildCostModel :: forall f s t a b. HasLens "buildCostModel" f s t a b => LensLike f s t a b Source #

                              buildCostModelAfter :: forall f s t a b. HasLens "buildCostModelAfter" f s t a b => LensLike f s t a b Source #

                              clusterDef :: forall f s t a b. HasLens "clusterDef" f s t a b => LensLike f s t a b Source #

                              costGraph :: forall f s t a b. HasLens "costGraph" f s t a b => LensLike f s t a b Source #

                              debugOptions :: forall f s t a b. HasLens "debugOptions" f s t a b => LensLike f s t a b Source #

                              deferredDeletionBytes :: forall f s t a b. HasLens "deferredDeletionBytes" f s t a b => LensLike f s t a b Source #

                              deviceCount :: forall f s t a b. HasLens "deviceCount" f s t a b => LensLike f s t a b Source #

                              deviceFilters :: forall f s t a b. HasLens "deviceFilters" f s t a b => LensLike f s t a b Source #

                              doCommonSubexpressionElimination :: forall f s t a b. HasLens "doCommonSubexpressionElimination" f s t a b => LensLike f s t a b Source #

                              doConstantFolding :: forall f s t a b. HasLens "doConstantFolding" f s t a b => LensLike f s t a b Source #

                              doFunctionInlining :: forall f s t a b. HasLens "doFunctionInlining" f s t a b => LensLike f s t a b Source #

                              enableBfloat16Sendrecv :: forall f s t a b. HasLens "enableBfloat16Sendrecv" f s t a b => LensLike f s t a b Source #

                              enableRecvScheduling :: forall f s t a b. HasLens "enableRecvScheduling" f s t a b => LensLike f s t a b Source #

                              forceGpuCompatible :: forall f s t a b. HasLens "forceGpuCompatible" f s t a b => LensLike f s t a b Source #

                              globalJitLevel :: forall f s t a b. HasLens "globalJitLevel" f s t a b => LensLike f s t a b Source #

                              globalName :: forall f s t a b. HasLens "globalName" f s t a b => LensLike f s t a b Source #

                              gpuOptions :: forall f s t a b. HasLens "gpuOptions" f s t a b => LensLike f s t a b Source #

                              graphOptions :: forall f s t a b. HasLens "graphOptions" f s t a b => LensLike f s t a b Source #

                              inferShapes :: forall f s t a b. HasLens "inferShapes" f s t a b => LensLike f s t a b Source #

                              interOpParallelismThreads :: forall f s t a b. HasLens "interOpParallelismThreads" f s t a b => LensLike f s t a b Source #

                              interOpThreadPool :: forall f s t a b. HasLens "interOpThreadPool" f s t a b => LensLike f s t a b Source #

                              intraOpParallelismThreads :: forall f s t a b. HasLens "intraOpParallelismThreads" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              logDevicePlacement :: forall f s t a b. HasLens "logDevicePlacement" f s t a b => LensLike f s t a b Source #

                              maybe'clusterDef :: forall f s t a b. HasLens "maybe'clusterDef" f s t a b => LensLike f s t a b Source #

                              maybe'costGraph :: forall f s t a b. HasLens "maybe'costGraph" f s t a b => LensLike f s t a b Source #

                              maybe'debugOptions :: forall f s t a b. HasLens "maybe'debugOptions" f s t a b => LensLike f s t a b Source #

                              maybe'gpuOptions :: forall f s t a b. HasLens "maybe'gpuOptions" f s t a b => LensLike f s t a b Source #

                              maybe'graphOptions :: forall f s t a b. HasLens "maybe'graphOptions" f s t a b => LensLike f s t a b Source #

                              maybe'optimizerOptions :: forall f s t a b. HasLens "maybe'optimizerOptions" f s t a b => LensLike f s t a b Source #

                              maybe'rewriteOptions :: forall f s t a b. HasLens "maybe'rewriteOptions" f s t a b => LensLike f s t a b Source #

                              maybe'rpcOptions :: forall f s t a b. HasLens "maybe'rpcOptions" f s t a b => LensLike f s t a b Source #

                              maybe'stepStats :: forall f s t a b. HasLens "maybe'stepStats" f s t a b => LensLike f s t a b Source #

                              numThreads :: forall f s t a b. HasLens "numThreads" f s t a b => LensLike f s t a b Source #

                              operationTimeoutInMs :: forall f s t a b. HasLens "operationTimeoutInMs" f s t a b => LensLike f s t a b Source #

                              optLevel :: forall f s t a b. HasLens "optLevel" f s t a b => LensLike f s t a b Source #

                              optimizerOptions :: forall f s t a b. HasLens "optimizerOptions" f s t a b => LensLike f s t a b Source #

                              outputPartitionGraphs :: forall f s t a b. HasLens "outputPartitionGraphs" f s t a b => LensLike f s t a b Source #

                              partitionGraphs :: forall f s t a b. HasLens "partitionGraphs" f s t a b => LensLike f s t a b Source #

                              perProcessGpuMemoryFraction :: forall f s t a b. HasLens "perProcessGpuMemoryFraction" f s t a b => LensLike f s t a b Source #

                              placePrunedGraph :: forall f s t a b. HasLens "placePrunedGraph" f s t a b => LensLike f s t a b Source #

                              placementPeriod :: forall f s t a b. HasLens "placementPeriod" f s t a b => LensLike f s t a b Source #

                              pollingActiveDelayUsecs :: forall f s t a b. HasLens "pollingActiveDelayUsecs" f s t a b => LensLike f s t a b Source #

                              pollingInactiveDelayMsecs :: forall f s t a b. HasLens "pollingInactiveDelayMsecs" f s t a b => LensLike f s t a b Source #

                              rewriteOptions :: forall f s t a b. HasLens "rewriteOptions" f s t a b => LensLike f s t a b Source #

                              rpcOptions :: forall f s t a b. HasLens "rpcOptions" f s t a b => LensLike f s t a b Source #

                              sessionInterOpThreadPool :: forall f s t a b. HasLens "sessionInterOpThreadPool" f s t a b => LensLike f s t a b Source #

                              stepStats :: forall f s t a b. HasLens "stepStats" f s t a b => LensLike f s t a b Source #

                              timelineStep :: forall f s t a b. HasLens "timelineStep" f s t a b => LensLike f s t a b Source #

                              timeoutInMs :: forall f s t a b. HasLens "timeoutInMs" f s t a b => LensLike f s t a b Source #

                              traceLevel :: forall f s t a b. HasLens "traceLevel" f s t a b => LensLike f s t a b Source #

                              usePerSessionThreads :: forall f s t a b. HasLens "usePerSessionThreads" f s t a b => LensLike f s t a b Source #

                              useRpcForInprocessMaster :: forall f s t a b. HasLens "useRpcForInprocessMaster" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              visibleDeviceList :: forall f s t a b. HasLens "visibleDeviceList" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-ControlFlow.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-ControlFlow.html new file mode 100644 index 0000000..f9ef1f5 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-ControlFlow.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.ControlFlow

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.ControlFlow

                              Documentation

                              data CondContextDef Source #

                              Instances

                              Eq CondContextDef Source # 
                              Ord CondContextDef Source # 
                              Show CondContextDef Source # 
                              Message CondContextDef Source # 

                              Methods

                              descriptor :: MessageDescriptor CondContextDef

                              Default CondContextDef Source # 
                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "branch" f CondContextDef CondContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "branch" -> (a -> f b) -> CondContextDef -> f CondContextDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "contextName" f CondContextDef CondContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "contextName" -> (a -> f b) -> CondContextDef -> f CondContextDef

                              ((~) * a (Maybe ValuesDef), (~) * b (Maybe ValuesDef), Functor f) => HasLens "maybe'valuesDef" f CondContextDef CondContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'valuesDef" -> (a -> f b) -> CondContextDef -> f CondContextDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "pivotName" f CondContextDef CondContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "pivotName" -> (a -> f b) -> CondContextDef -> f CondContextDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "predName" f CondContextDef CondContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "predName" -> (a -> f b) -> CondContextDef -> f CondContextDef

                              ((~) * a ValuesDef, (~) * b ValuesDef, Functor f) => HasLens "valuesDef" f CondContextDef CondContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "valuesDef" -> (a -> f b) -> CondContextDef -> f CondContextDef

                              data ValuesDef Source #

                              Constructors

                              ValuesDef 

                              Fields

                              Instances

                              Eq ValuesDef Source # 
                              Ord ValuesDef Source # 
                              Show ValuesDef Source # 
                              Message ValuesDef Source # 

                              Methods

                              descriptor :: MessageDescriptor ValuesDef

                              Default ValuesDef Source # 

                              Methods

                              def :: ValuesDef

                              ((~) * a (Map Text Text), (~) * b (Map Text Text), Functor f) => HasLens "externalValues" f ValuesDef ValuesDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "externalValues" -> (a -> f b) -> ValuesDef -> f ValuesDef

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "values" f ValuesDef ValuesDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "values" -> (a -> f b) -> ValuesDef -> f ValuesDef

                              data ValuesDef'ExternalValuesEntry Source #

                              Instances

                              Eq ValuesDef'ExternalValuesEntry Source # 
                              Ord ValuesDef'ExternalValuesEntry Source # 
                              Show ValuesDef'ExternalValuesEntry Source # 
                              Message ValuesDef'ExternalValuesEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor ValuesDef'ExternalValuesEntry

                              Default ValuesDef'ExternalValuesEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f ValuesDef'ExternalValuesEntry ValuesDef'ExternalValuesEntry a b Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "value" f ValuesDef'ExternalValuesEntry ValuesDef'ExternalValuesEntry a b Source # 

                              data WhileContextDef Source #

                              Instances

                              Eq WhileContextDef Source # 
                              Ord WhileContextDef Source # 
                              Show WhileContextDef Source # 
                              Message WhileContextDef Source # 

                              Methods

                              descriptor :: MessageDescriptor WhileContextDef

                              Default WhileContextDef Source # 
                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "backProp" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "backProp" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "contextName" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "contextName" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "loopEnterNames" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "loopEnterNames" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "loopExitNames" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "loopExitNames" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a (Maybe ValuesDef), (~) * b (Maybe ValuesDef), Functor f) => HasLens "maybe'valuesDef" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'valuesDef" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "parallelIterations" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "parallelIterations" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "pivotForBodyName" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "pivotForBodyName" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "pivotForPredName" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "pivotForPredName" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "pivotName" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "pivotName" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "swapMemory" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "swapMemory" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              ((~) * a ValuesDef, (~) * b ValuesDef, Functor f) => HasLens "valuesDef" f WhileContextDef WhileContextDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "valuesDef" -> (a -> f b) -> WhileContextDef -> f WhileContextDef

                              backProp :: forall f s t a b. HasLens "backProp" f s t a b => LensLike f s t a b Source #

                              branch :: forall f s t a b. HasLens "branch" f s t a b => LensLike f s t a b Source #

                              contextName :: forall f s t a b. HasLens "contextName" f s t a b => LensLike f s t a b Source #

                              externalValues :: forall f s t a b. HasLens "externalValues" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              loopEnterNames :: forall f s t a b. HasLens "loopEnterNames" f s t a b => LensLike f s t a b Source #

                              loopExitNames :: forall f s t a b. HasLens "loopExitNames" f s t a b => LensLike f s t a b Source #

                              maybe'valuesDef :: forall f s t a b. HasLens "maybe'valuesDef" f s t a b => LensLike f s t a b Source #

                              parallelIterations :: forall f s t a b. HasLens "parallelIterations" f s t a b => LensLike f s t a b Source #

                              pivotForBodyName :: forall f s t a b. HasLens "pivotForBodyName" f s t a b => LensLike f s t a b Source #

                              pivotForPredName :: forall f s t a b. HasLens "pivotForPredName" f s t a b => LensLike f s t a b Source #

                              pivotName :: forall f s t a b. HasLens "pivotName" f s t a b => LensLike f s t a b Source #

                              predName :: forall f s t a b. HasLens "predName" f s t a b => LensLike f s t a b Source #

                              swapMemory :: forall f s t a b. HasLens "swapMemory" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              values :: forall f s t a b. HasLens "values" f s t a b => LensLike f s t a b Source #

                              valuesDef :: forall f s t a b. HasLens "valuesDef" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Debug.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Debug.html new file mode 100644 index 0000000..2a25409 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Debug.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.Debug

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.Debug

                              Documentation

                              data DebugTensorWatch Source #

                              Instances

                              Eq DebugTensorWatch Source # 
                              Ord DebugTensorWatch Source # 
                              Show DebugTensorWatch Source # 
                              Message DebugTensorWatch Source # 

                              Methods

                              descriptor :: MessageDescriptor DebugTensorWatch

                              Default DebugTensorWatch Source # 
                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "debugOps" f DebugTensorWatch DebugTensorWatch a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "debugOps" -> (a -> f b) -> DebugTensorWatch -> f DebugTensorWatch

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "debugUrls" f DebugTensorWatch DebugTensorWatch a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "debugUrls" -> (a -> f b) -> DebugTensorWatch -> f DebugTensorWatch

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "nodeName" f DebugTensorWatch DebugTensorWatch a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "nodeName" -> (a -> f b) -> DebugTensorWatch -> f DebugTensorWatch

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "outputSlot" f DebugTensorWatch DebugTensorWatch a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "outputSlot" -> (a -> f b) -> DebugTensorWatch -> f DebugTensorWatch

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "tolerateDebugOpCreationFailures" f DebugTensorWatch DebugTensorWatch a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tolerateDebugOpCreationFailures" -> (a -> f b) -> DebugTensorWatch -> f DebugTensorWatch

                              debugOps :: forall f s t a b. HasLens "debugOps" f s t a b => LensLike f s t a b Source #

                              debugTensorWatchOpts :: forall f s t a b. HasLens "debugTensorWatchOpts" f s t a b => LensLike f s t a b Source #

                              debugUrls :: forall f s t a b. HasLens "debugUrls" f s t a b => LensLike f s t a b Source #

                              globalStep :: forall f s t a b. HasLens "globalStep" f s t a b => LensLike f s t a b Source #

                              nodeName :: forall f s t a b. HasLens "nodeName" f s t a b => LensLike f s t a b Source #

                              outputSlot :: forall f s t a b. HasLens "outputSlot" f s t a b => LensLike f s t a b Source #

                              tolerateDebugOpCreationFailures :: forall f s t a b. HasLens "tolerateDebugOpCreationFailures" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-MetaGraph.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-MetaGraph.html new file mode 100644 index 0000000..f5ad38e --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-MetaGraph.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.MetaGraph

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.MetaGraph

                              Documentation

                              data AssetFileDef Source #

                              Instances

                              Eq AssetFileDef Source # 
                              Ord AssetFileDef Source # 
                              Show AssetFileDef Source # 
                              Message AssetFileDef Source # 

                              Methods

                              descriptor :: MessageDescriptor AssetFileDef

                              Default AssetFileDef Source # 

                              Methods

                              def :: AssetFileDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "filename" f AssetFileDef AssetFileDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "filename" -> (a -> f b) -> AssetFileDef -> f AssetFileDef

                              ((~) * a (Maybe TensorInfo), (~) * b (Maybe TensorInfo), Functor f) => HasLens "maybe'tensorInfo" f AssetFileDef AssetFileDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'tensorInfo" -> (a -> f b) -> AssetFileDef -> f AssetFileDef

                              ((~) * a TensorInfo, (~) * b TensorInfo, Functor f) => HasLens "tensorInfo" f AssetFileDef AssetFileDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensorInfo" -> (a -> f b) -> AssetFileDef -> f AssetFileDef

                              data CollectionDef Source #

                              Instances

                              Eq CollectionDef Source # 
                              Ord CollectionDef Source # 
                              Show CollectionDef Source # 
                              Message CollectionDef Source # 

                              Methods

                              descriptor :: MessageDescriptor CollectionDef

                              Default CollectionDef Source # 

                              Methods

                              def :: CollectionDef

                              ((~) * a CollectionDef'AnyList, (~) * b CollectionDef'AnyList, Functor f) => HasLens "anyList" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "anyList" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a CollectionDef'BytesList, (~) * b CollectionDef'BytesList, Functor f) => HasLens "bytesList" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "bytesList" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a CollectionDef'FloatList, (~) * b CollectionDef'FloatList, Functor f) => HasLens "floatList" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "floatList" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a CollectionDef'Int64List, (~) * b CollectionDef'Int64List, Functor f) => HasLens "int64List" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "int64List" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a (Maybe CollectionDef'AnyList), (~) * b (Maybe CollectionDef'AnyList), Functor f) => HasLens "maybe'anyList" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'anyList" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a (Maybe CollectionDef'BytesList), (~) * b (Maybe CollectionDef'BytesList), Functor f) => HasLens "maybe'bytesList" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'bytesList" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a (Maybe CollectionDef'FloatList), (~) * b (Maybe CollectionDef'FloatList), Functor f) => HasLens "maybe'floatList" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'floatList" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a (Maybe CollectionDef'Int64List), (~) * b (Maybe CollectionDef'Int64List), Functor f) => HasLens "maybe'int64List" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'int64List" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a (Maybe CollectionDef'Kind), (~) * b (Maybe CollectionDef'Kind), Functor f) => HasLens "maybe'kind" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'kind" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a (Maybe CollectionDef'NodeList), (~) * b (Maybe CollectionDef'NodeList), Functor f) => HasLens "maybe'nodeList" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'nodeList" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              ((~) * a CollectionDef'NodeList, (~) * b CollectionDef'NodeList, Functor f) => HasLens "nodeList" f CollectionDef CollectionDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "nodeList" -> (a -> f b) -> CollectionDef -> f CollectionDef

                              data CollectionDef'BytesList Source #

                              data MetaGraphDef Source #

                              Instances

                              Eq MetaGraphDef Source # 
                              Ord MetaGraphDef Source # 
                              Show MetaGraphDef Source # 
                              Message MetaGraphDef Source # 

                              Methods

                              descriptor :: MessageDescriptor MetaGraphDef

                              Default MetaGraphDef Source # 

                              Methods

                              def :: MetaGraphDef

                              ((~) * a [AssetFileDef], (~) * b [AssetFileDef], Functor f) => HasLens "assetFileDef" f MetaGraphDef MetaGraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "assetFileDef" -> (a -> f b) -> MetaGraphDef -> f MetaGraphDef

                              ((~) * a (Map Text CollectionDef), (~) * b (Map Text CollectionDef), Functor f) => HasLens "collectionDef" f MetaGraphDef MetaGraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "collectionDef" -> (a -> f b) -> MetaGraphDef -> f MetaGraphDef

                              ((~) * a GraphDef, (~) * b GraphDef, Functor f) => HasLens "graphDef" f MetaGraphDef MetaGraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "graphDef" -> (a -> f b) -> MetaGraphDef -> f MetaGraphDef

                              ((~) * a (Maybe GraphDef), (~) * b (Maybe GraphDef), Functor f) => HasLens "maybe'graphDef" f MetaGraphDef MetaGraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'graphDef" -> (a -> f b) -> MetaGraphDef -> f MetaGraphDef

                              ((~) * a (Maybe MetaGraphDef'MetaInfoDef), (~) * b (Maybe MetaGraphDef'MetaInfoDef), Functor f) => HasLens "maybe'metaInfoDef" f MetaGraphDef MetaGraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'metaInfoDef" -> (a -> f b) -> MetaGraphDef -> f MetaGraphDef

                              ((~) * a (Maybe SaverDef), (~) * b (Maybe SaverDef), Functor f) => HasLens "maybe'saverDef" f MetaGraphDef MetaGraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'saverDef" -> (a -> f b) -> MetaGraphDef -> f MetaGraphDef

                              ((~) * a MetaGraphDef'MetaInfoDef, (~) * b MetaGraphDef'MetaInfoDef, Functor f) => HasLens "metaInfoDef" f MetaGraphDef MetaGraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "metaInfoDef" -> (a -> f b) -> MetaGraphDef -> f MetaGraphDef

                              ((~) * a SaverDef, (~) * b SaverDef, Functor f) => HasLens "saverDef" f MetaGraphDef MetaGraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "saverDef" -> (a -> f b) -> MetaGraphDef -> f MetaGraphDef

                              ((~) * a (Map Text SignatureDef), (~) * b (Map Text SignatureDef), Functor f) => HasLens "signatureDef" f MetaGraphDef MetaGraphDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "signatureDef" -> (a -> f b) -> MetaGraphDef -> f MetaGraphDef

                              data MetaGraphDef'CollectionDefEntry Source #

                              Instances

                              Eq MetaGraphDef'CollectionDefEntry Source # 
                              Ord MetaGraphDef'CollectionDefEntry Source # 
                              Show MetaGraphDef'CollectionDefEntry Source # 
                              Message MetaGraphDef'CollectionDefEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor MetaGraphDef'CollectionDefEntry

                              Default MetaGraphDef'CollectionDefEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f MetaGraphDef'CollectionDefEntry MetaGraphDef'CollectionDefEntry a b Source # 
                              ((~) * a (Maybe CollectionDef), (~) * b (Maybe CollectionDef), Functor f) => HasLens "maybe'value" f MetaGraphDef'CollectionDefEntry MetaGraphDef'CollectionDefEntry a b Source # 
                              ((~) * a CollectionDef, (~) * b CollectionDef, Functor f) => HasLens "value" f MetaGraphDef'CollectionDefEntry MetaGraphDef'CollectionDefEntry a b Source # 

                              data MetaGraphDef'MetaInfoDef Source #

                              Instances

                              Eq MetaGraphDef'MetaInfoDef Source # 
                              Ord MetaGraphDef'MetaInfoDef Source # 
                              Show MetaGraphDef'MetaInfoDef Source # 
                              Message MetaGraphDef'MetaInfoDef Source # 

                              Methods

                              descriptor :: MessageDescriptor MetaGraphDef'MetaInfoDef

                              Default MetaGraphDef'MetaInfoDef Source # 
                              ((~) * a Any, (~) * b Any, Functor f) => HasLens "anyInfo" f MetaGraphDef'MetaInfoDef MetaGraphDef'MetaInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "anyInfo" -> (a -> f b) -> MetaGraphDef'MetaInfoDef -> f MetaGraphDef'MetaInfoDef

                              ((~) * a (Maybe Any), (~) * b (Maybe Any), Functor f) => HasLens "maybe'anyInfo" f MetaGraphDef'MetaInfoDef MetaGraphDef'MetaInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'anyInfo" -> (a -> f b) -> MetaGraphDef'MetaInfoDef -> f MetaGraphDef'MetaInfoDef

                              ((~) * a (Maybe OpList), (~) * b (Maybe OpList), Functor f) => HasLens "maybe'strippedOpList" f MetaGraphDef'MetaInfoDef MetaGraphDef'MetaInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'strippedOpList" -> (a -> f b) -> MetaGraphDef'MetaInfoDef -> f MetaGraphDef'MetaInfoDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "metaGraphVersion" f MetaGraphDef'MetaInfoDef MetaGraphDef'MetaInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "metaGraphVersion" -> (a -> f b) -> MetaGraphDef'MetaInfoDef -> f MetaGraphDef'MetaInfoDef

                              ((~) * a OpList, (~) * b OpList, Functor f) => HasLens "strippedOpList" f MetaGraphDef'MetaInfoDef MetaGraphDef'MetaInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "strippedOpList" -> (a -> f b) -> MetaGraphDef'MetaInfoDef -> f MetaGraphDef'MetaInfoDef

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "tags" f MetaGraphDef'MetaInfoDef MetaGraphDef'MetaInfoDef a b Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "tensorflowGitVersion" f MetaGraphDef'MetaInfoDef MetaGraphDef'MetaInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensorflowGitVersion" -> (a -> f b) -> MetaGraphDef'MetaInfoDef -> f MetaGraphDef'MetaInfoDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "tensorflowVersion" f MetaGraphDef'MetaInfoDef MetaGraphDef'MetaInfoDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensorflowVersion" -> (a -> f b) -> MetaGraphDef'MetaInfoDef -> f MetaGraphDef'MetaInfoDef

                              data MetaGraphDef'SignatureDefEntry Source #

                              Instances

                              Eq MetaGraphDef'SignatureDefEntry Source # 
                              Ord MetaGraphDef'SignatureDefEntry Source # 
                              Show MetaGraphDef'SignatureDefEntry Source # 
                              Message MetaGraphDef'SignatureDefEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor MetaGraphDef'SignatureDefEntry

                              Default MetaGraphDef'SignatureDefEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f MetaGraphDef'SignatureDefEntry MetaGraphDef'SignatureDefEntry a b Source # 
                              ((~) * a (Maybe SignatureDef), (~) * b (Maybe SignatureDef), Functor f) => HasLens "maybe'value" f MetaGraphDef'SignatureDefEntry MetaGraphDef'SignatureDefEntry a b Source # 
                              ((~) * a SignatureDef, (~) * b SignatureDef, Functor f) => HasLens "value" f MetaGraphDef'SignatureDefEntry MetaGraphDef'SignatureDefEntry a b Source # 

                              data SignatureDef Source #

                              Instances

                              Eq SignatureDef Source # 
                              Ord SignatureDef Source # 
                              Show SignatureDef Source # 
                              Message SignatureDef Source # 

                              Methods

                              descriptor :: MessageDescriptor SignatureDef

                              Default SignatureDef Source # 

                              Methods

                              def :: SignatureDef

                              ((~) * a (Map Text TensorInfo), (~) * b (Map Text TensorInfo), Functor f) => HasLens "inputs" f SignatureDef SignatureDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "inputs" -> (a -> f b) -> SignatureDef -> f SignatureDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "methodName" f SignatureDef SignatureDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "methodName" -> (a -> f b) -> SignatureDef -> f SignatureDef

                              ((~) * a (Map Text TensorInfo), (~) * b (Map Text TensorInfo), Functor f) => HasLens "outputs" f SignatureDef SignatureDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "outputs" -> (a -> f b) -> SignatureDef -> f SignatureDef

                              data SignatureDef'InputsEntry Source #

                              Instances

                              Eq SignatureDef'InputsEntry Source # 
                              Ord SignatureDef'InputsEntry Source # 
                              Show SignatureDef'InputsEntry Source # 
                              Message SignatureDef'InputsEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor SignatureDef'InputsEntry

                              Default SignatureDef'InputsEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f SignatureDef'InputsEntry SignatureDef'InputsEntry a b Source # 
                              ((~) * a (Maybe TensorInfo), (~) * b (Maybe TensorInfo), Functor f) => HasLens "maybe'value" f SignatureDef'InputsEntry SignatureDef'InputsEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'value" -> (a -> f b) -> SignatureDef'InputsEntry -> f SignatureDef'InputsEntry

                              ((~) * a TensorInfo, (~) * b TensorInfo, Functor f) => HasLens "value" f SignatureDef'InputsEntry SignatureDef'InputsEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "value" -> (a -> f b) -> SignatureDef'InputsEntry -> f SignatureDef'InputsEntry

                              data SignatureDef'OutputsEntry Source #

                              Instances

                              Eq SignatureDef'OutputsEntry Source # 
                              Ord SignatureDef'OutputsEntry Source # 
                              Show SignatureDef'OutputsEntry Source # 
                              Message SignatureDef'OutputsEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor SignatureDef'OutputsEntry

                              Default SignatureDef'OutputsEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f SignatureDef'OutputsEntry SignatureDef'OutputsEntry a b Source # 
                              ((~) * a (Maybe TensorInfo), (~) * b (Maybe TensorInfo), Functor f) => HasLens "maybe'value" f SignatureDef'OutputsEntry SignatureDef'OutputsEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'value" -> (a -> f b) -> SignatureDef'OutputsEntry -> f SignatureDef'OutputsEntry

                              ((~) * a TensorInfo, (~) * b TensorInfo, Functor f) => HasLens "value" f SignatureDef'OutputsEntry SignatureDef'OutputsEntry a b Source # 

                              data TensorInfo Source #

                              Instances

                              Eq TensorInfo Source # 
                              Ord TensorInfo Source # 
                              Show TensorInfo Source # 
                              Message TensorInfo Source # 

                              Methods

                              descriptor :: MessageDescriptor TensorInfo

                              Default TensorInfo Source # 

                              Methods

                              def :: TensorInfo

                              ((~) * a TensorInfo'CooSparse, (~) * b TensorInfo'CooSparse, Functor f) => HasLens "cooSparse" f TensorInfo TensorInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "cooSparse" -> (a -> f b) -> TensorInfo -> f TensorInfo

                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "dtype" f TensorInfo TensorInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "dtype" -> (a -> f b) -> TensorInfo -> f TensorInfo

                              ((~) * a (Maybe TensorInfo'CooSparse), (~) * b (Maybe TensorInfo'CooSparse), Functor f) => HasLens "maybe'cooSparse" f TensorInfo TensorInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'cooSparse" -> (a -> f b) -> TensorInfo -> f TensorInfo

                              ((~) * a (Maybe TensorInfo'Encoding), (~) * b (Maybe TensorInfo'Encoding), Functor f) => HasLens "maybe'encoding" f TensorInfo TensorInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'encoding" -> (a -> f b) -> TensorInfo -> f TensorInfo

                              ((~) * a (Maybe Text), (~) * b (Maybe Text), Functor f) => HasLens "maybe'name" f TensorInfo TensorInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'name" -> (a -> f b) -> TensorInfo -> f TensorInfo

                              ((~) * a (Maybe TensorShapeProto), (~) * b (Maybe TensorShapeProto), Functor f) => HasLens "maybe'tensorShape" f TensorInfo TensorInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'tensorShape" -> (a -> f b) -> TensorInfo -> f TensorInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f TensorInfo TensorInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> TensorInfo -> f TensorInfo

                              ((~) * a TensorShapeProto, (~) * b TensorShapeProto, Functor f) => HasLens "tensorShape" f TensorInfo TensorInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensorShape" -> (a -> f b) -> TensorInfo -> f TensorInfo

                              data TensorInfo'CooSparse Source #

                              Instances

                              Eq TensorInfo'CooSparse Source # 
                              Ord TensorInfo'CooSparse Source # 
                              Show TensorInfo'CooSparse Source # 
                              Message TensorInfo'CooSparse Source # 

                              Methods

                              descriptor :: MessageDescriptor TensorInfo'CooSparse

                              Default TensorInfo'CooSparse Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "denseShapeTensorName" f TensorInfo'CooSparse TensorInfo'CooSparse a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "denseShapeTensorName" -> (a -> f b) -> TensorInfo'CooSparse -> f TensorInfo'CooSparse

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "indicesTensorName" f TensorInfo'CooSparse TensorInfo'CooSparse a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "indicesTensorName" -> (a -> f b) -> TensorInfo'CooSparse -> f TensorInfo'CooSparse

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "valuesTensorName" f TensorInfo'CooSparse TensorInfo'CooSparse a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "valuesTensorName" -> (a -> f b) -> TensorInfo'CooSparse -> f TensorInfo'CooSparse

                              anyInfo :: forall f s t a b. HasLens "anyInfo" f s t a b => LensLike f s t a b Source #

                              anyList :: forall f s t a b. HasLens "anyList" f s t a b => LensLike f s t a b Source #

                              assetFileDef :: forall f s t a b. HasLens "assetFileDef" f s t a b => LensLike f s t a b Source #

                              bytesList :: forall f s t a b. HasLens "bytesList" f s t a b => LensLike f s t a b Source #

                              collectionDef :: forall f s t a b. HasLens "collectionDef" f s t a b => LensLike f s t a b Source #

                              cooSparse :: forall f s t a b. HasLens "cooSparse" f s t a b => LensLike f s t a b Source #

                              denseShapeTensorName :: forall f s t a b. HasLens "denseShapeTensorName" f s t a b => LensLike f s t a b Source #

                              dtype :: forall f s t a b. HasLens "dtype" f s t a b => LensLike f s t a b Source #

                              filename :: forall f s t a b. HasLens "filename" f s t a b => LensLike f s t a b Source #

                              floatList :: forall f s t a b. HasLens "floatList" f s t a b => LensLike f s t a b Source #

                              graphDef :: forall f s t a b. HasLens "graphDef" f s t a b => LensLike f s t a b Source #

                              indicesTensorName :: forall f s t a b. HasLens "indicesTensorName" f s t a b => LensLike f s t a b Source #

                              inputs :: forall f s t a b. HasLens "inputs" f s t a b => LensLike f s t a b Source #

                              int64List :: forall f s t a b. HasLens "int64List" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              maybe'anyInfo :: forall f s t a b. HasLens "maybe'anyInfo" f s t a b => LensLike f s t a b Source #

                              maybe'anyList :: forall f s t a b. HasLens "maybe'anyList" f s t a b => LensLike f s t a b Source #

                              maybe'bytesList :: forall f s t a b. HasLens "maybe'bytesList" f s t a b => LensLike f s t a b Source #

                              maybe'cooSparse :: forall f s t a b. HasLens "maybe'cooSparse" f s t a b => LensLike f s t a b Source #

                              maybe'encoding :: forall f s t a b. HasLens "maybe'encoding" f s t a b => LensLike f s t a b Source #

                              maybe'floatList :: forall f s t a b. HasLens "maybe'floatList" f s t a b => LensLike f s t a b Source #

                              maybe'graphDef :: forall f s t a b. HasLens "maybe'graphDef" f s t a b => LensLike f s t a b Source #

                              maybe'int64List :: forall f s t a b. HasLens "maybe'int64List" f s t a b => LensLike f s t a b Source #

                              maybe'kind :: forall f s t a b. HasLens "maybe'kind" f s t a b => LensLike f s t a b Source #

                              maybe'metaInfoDef :: forall f s t a b. HasLens "maybe'metaInfoDef" f s t a b => LensLike f s t a b Source #

                              maybe'name :: forall f s t a b. HasLens "maybe'name" f s t a b => LensLike f s t a b Source #

                              maybe'nodeList :: forall f s t a b. HasLens "maybe'nodeList" f s t a b => LensLike f s t a b Source #

                              maybe'saverDef :: forall f s t a b. HasLens "maybe'saverDef" f s t a b => LensLike f s t a b Source #

                              maybe'strippedOpList :: forall f s t a b. HasLens "maybe'strippedOpList" f s t a b => LensLike f s t a b Source #

                              maybe'tensorInfo :: forall f s t a b. HasLens "maybe'tensorInfo" f s t a b => LensLike f s t a b Source #

                              maybe'tensorShape :: forall f s t a b. HasLens "maybe'tensorShape" f s t a b => LensLike f s t a b Source #

                              maybe'value :: forall f s t a b. HasLens "maybe'value" f s t a b => LensLike f s t a b Source #

                              metaGraphVersion :: forall f s t a b. HasLens "metaGraphVersion" f s t a b => LensLike f s t a b Source #

                              metaInfoDef :: forall f s t a b. HasLens "metaInfoDef" f s t a b => LensLike f s t a b Source #

                              methodName :: forall f s t a b. HasLens "methodName" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              nodeList :: forall f s t a b. HasLens "nodeList" f s t a b => LensLike f s t a b Source #

                              outputs :: forall f s t a b. HasLens "outputs" f s t a b => LensLike f s t a b Source #

                              saverDef :: forall f s t a b. HasLens "saverDef" f s t a b => LensLike f s t a b Source #

                              signatureDef :: forall f s t a b. HasLens "signatureDef" f s t a b => LensLike f s t a b Source #

                              strippedOpList :: forall f s t a b. HasLens "strippedOpList" f s t a b => LensLike f s t a b Source #

                              tags :: forall f s t a b. HasLens "tags" f s t a b => LensLike f s t a b Source #

                              tensorInfo :: forall f s t a b. HasLens "tensorInfo" f s t a b => LensLike f s t a b Source #

                              tensorShape :: forall f s t a b. HasLens "tensorShape" f s t a b => LensLike f s t a b Source #

                              tensorflowGitVersion :: forall f s t a b. HasLens "tensorflowGitVersion" f s t a b => LensLike f s t a b Source #

                              tensorflowVersion :: forall f s t a b. HasLens "tensorflowVersion" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              valuesTensorName :: forall f s t a b. HasLens "valuesTensorName" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-NamedTensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-NamedTensor.html new file mode 100644 index 0000000..db93869 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-NamedTensor.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.NamedTensor

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.NamedTensor

                              Documentation

                              data NamedTensorProto Source #

                              Instances

                              Eq NamedTensorProto Source # 
                              Ord NamedTensorProto Source # 
                              Show NamedTensorProto Source # 
                              Message NamedTensorProto Source # 

                              Methods

                              descriptor :: MessageDescriptor NamedTensorProto

                              Default NamedTensorProto Source # 
                              ((~) * a (Maybe TensorProto), (~) * b (Maybe TensorProto), Functor f) => HasLens "maybe'tensor" f NamedTensorProto NamedTensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'tensor" -> (a -> f b) -> NamedTensorProto -> f NamedTensorProto

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f NamedTensorProto NamedTensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> NamedTensorProto -> f NamedTensorProto

                              ((~) * a TensorProto, (~) * b TensorProto, Functor f) => HasLens "tensor" f NamedTensorProto NamedTensorProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensor" -> (a -> f b) -> NamedTensorProto -> f NamedTensorProto

                              maybe'tensor :: forall f s t a b. HasLens "maybe'tensor" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              tensor :: forall f s t a b. HasLens "tensor" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-QueueRunner.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-QueueRunner.html new file mode 100644 index 0000000..f9339cd --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-QueueRunner.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.QueueRunner

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.QueueRunner

                              Documentation

                              data QueueRunnerDef Source #

                              Instances

                              Eq QueueRunnerDef Source # 
                              Ord QueueRunnerDef Source # 
                              Show QueueRunnerDef Source # 
                              Message QueueRunnerDef Source # 

                              Methods

                              descriptor :: MessageDescriptor QueueRunnerDef

                              Default QueueRunnerDef Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "cancelOpName" f QueueRunnerDef QueueRunnerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "cancelOpName" -> (a -> f b) -> QueueRunnerDef -> f QueueRunnerDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "closeOpName" f QueueRunnerDef QueueRunnerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "closeOpName" -> (a -> f b) -> QueueRunnerDef -> f QueueRunnerDef

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "enqueueOpName" f QueueRunnerDef QueueRunnerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "enqueueOpName" -> (a -> f b) -> QueueRunnerDef -> f QueueRunnerDef

                              ((~) * a [Code], (~) * b [Code], Functor f) => HasLens "queueClosedExceptionTypes" f QueueRunnerDef QueueRunnerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "queueClosedExceptionTypes" -> (a -> f b) -> QueueRunnerDef -> f QueueRunnerDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "queueName" f QueueRunnerDef QueueRunnerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "queueName" -> (a -> f b) -> QueueRunnerDef -> f QueueRunnerDef

                              cancelOpName :: forall f s t a b. HasLens "cancelOpName" f s t a b => LensLike f s t a b Source #

                              closeOpName :: forall f s t a b. HasLens "closeOpName" f s t a b => LensLike f s t a b Source #

                              enqueueOpName :: forall f s t a b. HasLens "enqueueOpName" f s t a b => LensLike f s t a b Source #

                              queueClosedExceptionTypes :: forall f s t a b. HasLens "queueClosedExceptionTypes" f s t a b => LensLike f s t a b Source #

                              queueName :: forall f s t a b. HasLens "queueName" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-RewriterConfig.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-RewriterConfig.html new file mode 100644 index 0000000..83483bf --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-RewriterConfig.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.RewriterConfig

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.RewriterConfig

                              Documentation

                              data AutoParallelOptions Source #

                              data RewriterConfig Source #

                              Instances

                              Eq RewriterConfig Source # 
                              Ord RewriterConfig Source # 
                              Show RewriterConfig Source # 
                              Message RewriterConfig Source # 

                              Methods

                              descriptor :: MessageDescriptor RewriterConfig

                              Default RewriterConfig Source # 
                              ((~) * a AutoParallelOptions, (~) * b AutoParallelOptions, Functor f) => HasLens "autoParallel" f RewriterConfig RewriterConfig a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "autoParallel" -> (a -> f b) -> RewriterConfig -> f RewriterConfig

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "constantFolding" f RewriterConfig RewriterConfig a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "constantFolding" -> (a -> f b) -> RewriterConfig -> f RewriterConfig

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "disableModelPruning" f RewriterConfig RewriterConfig a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "disableModelPruning" -> (a -> f b) -> RewriterConfig -> f RewriterConfig

                              ((~) * a (Maybe AutoParallelOptions), (~) * b (Maybe AutoParallelOptions), Functor f) => HasLens "maybe'autoParallel" f RewriterConfig RewriterConfig a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'autoParallel" -> (a -> f b) -> RewriterConfig -> f RewriterConfig

                              ((~) * a RewriterConfig'MemOptType, (~) * b RewriterConfig'MemOptType, Functor f) => HasLens "memoryOptimization" f RewriterConfig RewriterConfig a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "memoryOptimization" -> (a -> f b) -> RewriterConfig -> f RewriterConfig

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "optimizeTensorLayout" f RewriterConfig RewriterConfig a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "optimizeTensorLayout" -> (a -> f b) -> RewriterConfig -> f RewriterConfig

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "optimizers" f RewriterConfig RewriterConfig a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "optimizers" -> (a -> f b) -> RewriterConfig -> f RewriterConfig

                              data RewriterConfig'MemOptType Source #

                              Instances

                              Bounded RewriterConfig'MemOptType Source # 
                              Enum RewriterConfig'MemOptType Source # 
                              Eq RewriterConfig'MemOptType Source # 
                              Ord RewriterConfig'MemOptType Source # 
                              Show RewriterConfig'MemOptType Source # 
                              MessageEnum RewriterConfig'MemOptType Source # 
                              FieldDefault RewriterConfig'MemOptType Source # 
                              Default RewriterConfig'MemOptType Source # 

                              autoParallel :: forall f s t a b. HasLens "autoParallel" f s t a b => LensLike f s t a b Source #

                              constantFolding :: forall f s t a b. HasLens "constantFolding" f s t a b => LensLike f s t a b Source #

                              disableModelPruning :: forall f s t a b. HasLens "disableModelPruning" f s t a b => LensLike f s t a b Source #

                              enable :: forall f s t a b. HasLens "enable" f s t a b => LensLike f s t a b Source #

                              maybe'autoParallel :: forall f s t a b. HasLens "maybe'autoParallel" f s t a b => LensLike f s t a b Source #

                              memoryOptimization :: forall f s t a b. HasLens "memoryOptimization" f s t a b => LensLike f s t a b Source #

                              numReplicas :: forall f s t a b. HasLens "numReplicas" f s t a b => LensLike f s t a b Source #

                              optimizeTensorLayout :: forall f s t a b. HasLens "optimizeTensorLayout" f s t a b => LensLike f s t a b Source #

                              optimizers :: forall f s t a b. HasLens "optimizers" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-SavedModel.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-SavedModel.html new file mode 100644 index 0000000..64e9e71 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-SavedModel.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.SavedModel

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.SavedModel

                              Documentation

                              data SavedModel Source #

                              Instances

                              Eq SavedModel Source # 
                              Ord SavedModel Source # 
                              Show SavedModel Source # 
                              Message SavedModel Source # 

                              Methods

                              descriptor :: MessageDescriptor SavedModel

                              Default SavedModel Source # 

                              Methods

                              def :: SavedModel

                              ((~) * a [MetaGraphDef], (~) * b [MetaGraphDef], Functor f) => HasLens "metaGraphs" f SavedModel SavedModel a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "metaGraphs" -> (a -> f b) -> SavedModel -> f SavedModel

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "savedModelSchemaVersion" f SavedModel SavedModel a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "savedModelSchemaVersion" -> (a -> f b) -> SavedModel -> f SavedModel

                              metaGraphs :: forall f s t a b. HasLens "metaGraphs" f s t a b => LensLike f s t a b Source #

                              savedModelSchemaVersion :: forall f s t a b. HasLens "savedModelSchemaVersion" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Saver.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Saver.html new file mode 100644 index 0000000..33c2b34 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Saver.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.Saver

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.Saver

                              Documentation

                              data SaverDef Source #

                              Instances

                              Eq SaverDef Source # 
                              Ord SaverDef Source # 
                              Show SaverDef Source # 
                              Message SaverDef Source # 

                              Methods

                              descriptor :: MessageDescriptor SaverDef

                              Default SaverDef Source # 

                              Methods

                              def :: SaverDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "filenameTensorName" f SaverDef SaverDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "filenameTensorName" -> (a -> f b) -> SaverDef -> f SaverDef

                              ((~) * a Float, (~) * b Float, Functor f) => HasLens "keepCheckpointEveryNHours" f SaverDef SaverDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "keepCheckpointEveryNHours" -> (a -> f b) -> SaverDef -> f SaverDef

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "maxToKeep" f SaverDef SaverDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maxToKeep" -> (a -> f b) -> SaverDef -> f SaverDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "restoreOpName" f SaverDef SaverDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "restoreOpName" -> (a -> f b) -> SaverDef -> f SaverDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "saveTensorName" f SaverDef SaverDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "saveTensorName" -> (a -> f b) -> SaverDef -> f SaverDef

                              ((~) * a Bool, (~) * b Bool, Functor f) => HasLens "sharded" f SaverDef SaverDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "sharded" -> (a -> f b) -> SaverDef -> f SaverDef

                              ((~) * a SaverDef'CheckpointFormatVersion, (~) * b SaverDef'CheckpointFormatVersion, Functor f) => HasLens "version" f SaverDef SaverDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "version" -> (a -> f b) -> SaverDef -> f SaverDef

                              data SaverDef'CheckpointFormatVersion Source #

                              Instances

                              Bounded SaverDef'CheckpointFormatVersion Source # 
                              Enum SaverDef'CheckpointFormatVersion Source # 
                              Eq SaverDef'CheckpointFormatVersion Source # 
                              Ord SaverDef'CheckpointFormatVersion Source # 
                              Show SaverDef'CheckpointFormatVersion Source # 
                              MessageEnum SaverDef'CheckpointFormatVersion Source # 
                              FieldDefault SaverDef'CheckpointFormatVersion Source # 
                              Default SaverDef'CheckpointFormatVersion Source # 

                              filenameTensorName :: forall f s t a b. HasLens "filenameTensorName" f s t a b => LensLike f s t a b Source #

                              keepCheckpointEveryNHours :: forall f s t a b. HasLens "keepCheckpointEveryNHours" f s t a b => LensLike f s t a b Source #

                              maxToKeep :: forall f s t a b. HasLens "maxToKeep" f s t a b => LensLike f s t a b Source #

                              restoreOpName :: forall f s t a b. HasLens "restoreOpName" f s t a b => LensLike f s t a b Source #

                              saveTensorName :: forall f s t a b. HasLens "saveTensorName" f s t a b => LensLike f s t a b Source #

                              sharded :: forall f s t a b. HasLens "sharded" f s t a b => LensLike f s t a b Source #

                              version :: forall f s t a b. HasLens "version" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-TensorBundle.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-TensorBundle.html new file mode 100644 index 0000000..6f09808 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-TensorBundle.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.TensorBundle

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.TensorBundle

                              Documentation

                              data BundleEntryProto Source #

                              Instances

                              Eq BundleEntryProto Source # 
                              Ord BundleEntryProto Source # 
                              Show BundleEntryProto Source # 
                              Message BundleEntryProto Source # 

                              Methods

                              descriptor :: MessageDescriptor BundleEntryProto

                              Default BundleEntryProto Source # 
                              ((~) * a Word32, (~) * b Word32, Functor f) => HasLens "crc32c" f BundleEntryProto BundleEntryProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "crc32c" -> (a -> f b) -> BundleEntryProto -> f BundleEntryProto

                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "dtype" f BundleEntryProto BundleEntryProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "dtype" -> (a -> f b) -> BundleEntryProto -> f BundleEntryProto

                              ((~) * a (Maybe TensorShapeProto), (~) * b (Maybe TensorShapeProto), Functor f) => HasLens "maybe'shape" f BundleEntryProto BundleEntryProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'shape" -> (a -> f b) -> BundleEntryProto -> f BundleEntryProto

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "offset" f BundleEntryProto BundleEntryProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "offset" -> (a -> f b) -> BundleEntryProto -> f BundleEntryProto

                              ((~) * a TensorShapeProto, (~) * b TensorShapeProto, Functor f) => HasLens "shape" f BundleEntryProto BundleEntryProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "shape" -> (a -> f b) -> BundleEntryProto -> f BundleEntryProto

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "shardId" f BundleEntryProto BundleEntryProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "shardId" -> (a -> f b) -> BundleEntryProto -> f BundleEntryProto

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "size" f BundleEntryProto BundleEntryProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "size" -> (a -> f b) -> BundleEntryProto -> f BundleEntryProto

                              ((~) * a [TensorSliceProto], (~) * b [TensorSliceProto], Functor f) => HasLens "slices" f BundleEntryProto BundleEntryProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "slices" -> (a -> f b) -> BundleEntryProto -> f BundleEntryProto

                              data BundleHeaderProto Source #

                              Instances

                              Eq BundleHeaderProto Source # 
                              Ord BundleHeaderProto Source # 
                              Show BundleHeaderProto Source # 
                              Message BundleHeaderProto Source # 

                              Methods

                              descriptor :: MessageDescriptor BundleHeaderProto

                              Default BundleHeaderProto Source # 
                              ((~) * a BundleHeaderProto'Endianness, (~) * b BundleHeaderProto'Endianness, Functor f) => HasLens "endianness" f BundleHeaderProto BundleHeaderProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "endianness" -> (a -> f b) -> BundleHeaderProto -> f BundleHeaderProto

                              ((~) * a (Maybe VersionDef), (~) * b (Maybe VersionDef), Functor f) => HasLens "maybe'version" f BundleHeaderProto BundleHeaderProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'version" -> (a -> f b) -> BundleHeaderProto -> f BundleHeaderProto

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "numShards" f BundleHeaderProto BundleHeaderProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "numShards" -> (a -> f b) -> BundleHeaderProto -> f BundleHeaderProto

                              ((~) * a VersionDef, (~) * b VersionDef, Functor f) => HasLens "version" f BundleHeaderProto BundleHeaderProto a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "version" -> (a -> f b) -> BundleHeaderProto -> f BundleHeaderProto

                              data BundleHeaderProto'Endianness Source #

                              Instances

                              Bounded BundleHeaderProto'Endianness Source # 
                              Enum BundleHeaderProto'Endianness Source # 
                              Eq BundleHeaderProto'Endianness Source # 
                              Ord BundleHeaderProto'Endianness Source # 
                              Show BundleHeaderProto'Endianness Source # 
                              MessageEnum BundleHeaderProto'Endianness Source # 
                              FieldDefault BundleHeaderProto'Endianness Source # 
                              Default BundleHeaderProto'Endianness Source # 

                              crc32c :: forall f s t a b. HasLens "crc32c" f s t a b => LensLike f s t a b Source #

                              dtype :: forall f s t a b. HasLens "dtype" f s t a b => LensLike f s t a b Source #

                              endianness :: forall f s t a b. HasLens "endianness" f s t a b => LensLike f s t a b Source #

                              maybe'shape :: forall f s t a b. HasLens "maybe'shape" f s t a b => LensLike f s t a b Source #

                              maybe'version :: forall f s t a b. HasLens "maybe'version" f s t a b => LensLike f s t a b Source #

                              numShards :: forall f s t a b. HasLens "numShards" f s t a b => LensLike f s t a b Source #

                              offset :: forall f s t a b. HasLens "offset" f s t a b => LensLike f s t a b Source #

                              shape :: forall f s t a b. HasLens "shape" f s t a b => LensLike f s t a b Source #

                              shardId :: forall f s t a b. HasLens "shardId" f s t a b => LensLike f s t a b Source #

                              size :: forall f s t a b. HasLens "size" f s t a b => LensLike f s t a b Source #

                              slices :: forall f s t a b. HasLens "slices" f s t a b => LensLike f s t a b Source #

                              version :: forall f s t a b. HasLens "version" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-TensorflowServer.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-TensorflowServer.html new file mode 100644 index 0000000..77da447 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-TensorflowServer.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.TensorflowServer

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Protobuf.TensorflowServer

                              Documentation

                              data ServerDef Source #

                              Instances

                              Eq ServerDef Source # 
                              Ord ServerDef Source # 
                              Show ServerDef Source # 
                              Message ServerDef Source # 

                              Methods

                              descriptor :: MessageDescriptor ServerDef

                              Default ServerDef Source # 

                              Methods

                              def :: ServerDef

                              ((~) * a ClusterDef, (~) * b ClusterDef, Functor f) => HasLens "cluster" f ServerDef ServerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "cluster" -> (a -> f b) -> ServerDef -> f ServerDef

                              ((~) * a ConfigProto, (~) * b ConfigProto, Functor f) => HasLens "defaultSessionConfig" f ServerDef ServerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "defaultSessionConfig" -> (a -> f b) -> ServerDef -> f ServerDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "jobName" f ServerDef ServerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "jobName" -> (a -> f b) -> ServerDef -> f ServerDef

                              ((~) * a (Maybe ClusterDef), (~) * b (Maybe ClusterDef), Functor f) => HasLens "maybe'cluster" f ServerDef ServerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'cluster" -> (a -> f b) -> ServerDef -> f ServerDef

                              ((~) * a (Maybe ConfigProto), (~) * b (Maybe ConfigProto), Functor f) => HasLens "maybe'defaultSessionConfig" f ServerDef ServerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'defaultSessionConfig" -> (a -> f b) -> ServerDef -> f ServerDef

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "protocol" f ServerDef ServerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "protocol" -> (a -> f b) -> ServerDef -> f ServerDef

                              ((~) * a Int32, (~) * b Int32, Functor f) => HasLens "taskIndex" f ServerDef ServerDef a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "taskIndex" -> (a -> f b) -> ServerDef -> f ServerDef

                              cluster :: forall f s t a b. HasLens "cluster" f s t a b => LensLike f s t a b Source #

                              defaultSessionConfig :: forall f s t a b. HasLens "defaultSessionConfig" f s t a b => LensLike f s t a b Source #

                              jobName :: forall f s t a b. HasLens "jobName" f s t a b => LensLike f s t a b Source #

                              maybe'cluster :: forall f s t a b. HasLens "maybe'cluster" f s t a b => LensLike f s t a b Source #

                              maybe'defaultSessionConfig :: forall f s t a b. HasLens "maybe'defaultSessionConfig" f s t a b => LensLike f s t a b Source #

                              protocol :: forall f s t a b. HasLens "protocol" f s t a b => LensLike f s t a b Source #

                              taskIndex :: forall f s t a b. HasLens "taskIndex" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-Event.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-Event.html index 123009a..2606dd1 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-Event.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-Event.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Util.Event

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Util.Event

                              Documentation

                              data Event

                              Instances

                              Eq Event 
                              Show Event 
                              Message Event 
                              Default Event 
                              HasField "fileVersion" Event Event 
                              HasField "graphDef" Event Event 
                              HasField "logMessage" Event Event 
                              HasField "maybe'fileVersion" Event Event 
                              HasField "maybe'graphDef" Event Event 
                              HasField "maybe'logMessage" Event Event 
                              HasField "maybe'metaGraphDef" Event Event 
                              HasField "maybe'sessionLog" Event Event 
                              HasField "maybe'summary" Event Event 
                              HasField "maybe'taggedRunMetadata" Event Event 
                              HasField "metaGraphDef" Event Event 
                              HasField "sessionLog" Event Event 
                              HasField "step" Event Event 
                              HasField "summary" Event Event 
                              HasField "taggedRunMetadata" Event Event 
                              HasField "wallTime" Event Event 
                              type Field "fileVersion" Event = Text 
                              type Field "graphDef" Event = ByteString 
                              type Field "logMessage" Event = LogMessage 
                              type Field "maybe'fileVersion" Event = Maybe Text 
                              type Field "maybe'graphDef" Event = Maybe ByteString 
                              type Field "maybe'logMessage" Event = Maybe LogMessage 
                              type Field "maybe'metaGraphDef" Event = Maybe ByteString 
                              type Field "maybe'sessionLog" Event = Maybe SessionLog 
                              type Field "maybe'summary" Event = Maybe Summary 
                              type Field "maybe'taggedRunMetadata" Event = Maybe TaggedRunMetadata 
                              type Field "metaGraphDef" Event = ByteString 
                              type Field "sessionLog" Event = SessionLog 
                              type Field "step" Event = Int64 
                              type Field "summary" Event = Summary 
                              type Field "taggedRunMetadata" Event = TaggedRunMetadata 
                              type Field "wallTime" Event = Double 

                              data LogMessage

                              Instances

                              Eq LogMessage 
                              Show LogMessage 
                              Message LogMessage 
                              Default LogMessage 
                              HasField "level" LogMessage LogMessage 
                              HasField "message" LogMessage LogMessage 
                              type Field "level" LogMessage = LogMessage'Level 
                              type Field "message" LogMessage = Text 

                              data SessionLog

                              Instances

                              Eq SessionLog 
                              Show SessionLog 
                              Message SessionLog 
                              Default SessionLog 
                              HasField "checkpointPath" SessionLog SessionLog 
                              HasField "msg" SessionLog SessionLog 
                              HasField "status" SessionLog SessionLog 
                              type Field "checkpointPath" SessionLog = Text 
                              type Field "msg" SessionLog = Text 
                              type Field "status" SessionLog = SessionLog'SessionStatus 

                              checkpointPath :: forall msg msg'. HasField "checkpointPath" msg msg' => Lens msg msg' (Field "checkpointPath" msg) (Field "checkpointPath" msg')

                              fileVersion :: forall msg msg'. HasField "fileVersion" msg msg' => Lens msg msg' (Field "fileVersion" msg) (Field "fileVersion" msg')

                              graphDef :: forall msg msg'. HasField "graphDef" msg msg' => Lens msg msg' (Field "graphDef" msg) (Field "graphDef" msg')

                              level :: forall msg msg'. HasField "level" msg msg' => Lens msg msg' (Field "level" msg) (Field "level" msg')

                              logMessage :: forall msg msg'. HasField "logMessage" msg msg' => Lens msg msg' (Field "logMessage" msg) (Field "logMessage" msg')

                              maybe'fileVersion :: forall msg msg'. HasField "maybe'fileVersion" msg msg' => Lens msg msg' (Field "maybe'fileVersion" msg) (Field "maybe'fileVersion" msg')

                              maybe'graphDef :: forall msg msg'. HasField "maybe'graphDef" msg msg' => Lens msg msg' (Field "maybe'graphDef" msg) (Field "maybe'graphDef" msg')

                              maybe'logMessage :: forall msg msg'. HasField "maybe'logMessage" msg msg' => Lens msg msg' (Field "maybe'logMessage" msg) (Field "maybe'logMessage" msg')

                              maybe'metaGraphDef :: forall msg msg'. HasField "maybe'metaGraphDef" msg msg' => Lens msg msg' (Field "maybe'metaGraphDef" msg) (Field "maybe'metaGraphDef" msg')

                              maybe'sessionLog :: forall msg msg'. HasField "maybe'sessionLog" msg msg' => Lens msg msg' (Field "maybe'sessionLog" msg) (Field "maybe'sessionLog" msg')

                              maybe'summary :: forall msg msg'. HasField "maybe'summary" msg msg' => Lens msg msg' (Field "maybe'summary" msg) (Field "maybe'summary" msg')

                              maybe'taggedRunMetadata :: forall msg msg'. HasField "maybe'taggedRunMetadata" msg msg' => Lens msg msg' (Field "maybe'taggedRunMetadata" msg) (Field "maybe'taggedRunMetadata" msg')

                              message :: forall msg msg'. HasField "message" msg msg' => Lens msg msg' (Field "message" msg) (Field "message" msg')

                              metaGraphDef :: forall msg msg'. HasField "metaGraphDef" msg msg' => Lens msg msg' (Field "metaGraphDef" msg) (Field "metaGraphDef" msg')

                              msg :: forall msg msg'. HasField "msg" msg msg' => Lens msg msg' (Field "msg" msg) (Field "msg" msg')

                              runMetadata :: forall msg msg'. HasField "runMetadata" msg msg' => Lens msg msg' (Field "runMetadata" msg) (Field "runMetadata" msg')

                              sessionLog :: forall msg msg'. HasField "sessionLog" msg msg' => Lens msg msg' (Field "sessionLog" msg) (Field "sessionLog" msg')

                              status :: forall msg msg'. HasField "status" msg msg' => Lens msg msg' (Field "status" msg) (Field "status" msg')

                              step :: forall msg msg'. HasField "step" msg msg' => Lens msg msg' (Field "step" msg) (Field "step" msg')

                              summary :: forall msg msg'. HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg')

                              tag :: forall msg msg'. HasField "tag" msg msg' => Lens msg msg' (Field "tag" msg) (Field "tag" msg')

                              taggedRunMetadata :: forall msg msg'. HasField "taggedRunMetadata" msg msg' => Lens msg msg' (Field "taggedRunMetadata" msg) (Field "taggedRunMetadata" msg')

                              wallTime :: forall msg msg'. HasField "wallTime" msg msg' => Lens msg msg' (Field "wallTime" msg) (Field "wallTime" msg')

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Util.Event

                              Documentation

                              data Event Source #

                              Instances

                              Eq Event Source # 

                              Methods

                              (==) :: Event -> Event -> Bool #

                              (/=) :: Event -> Event -> Bool #

                              Ord Event Source # 

                              Methods

                              compare :: Event -> Event -> Ordering #

                              (<) :: Event -> Event -> Bool #

                              (<=) :: Event -> Event -> Bool #

                              (>) :: Event -> Event -> Bool #

                              (>=) :: Event -> Event -> Bool #

                              max :: Event -> Event -> Event #

                              min :: Event -> Event -> Event #

                              Show Event Source # 

                              Methods

                              showsPrec :: Int -> Event -> ShowS #

                              show :: Event -> String #

                              showList :: [Event] -> ShowS #

                              Message Event Source # 

                              Methods

                              descriptor :: MessageDescriptor Event

                              Default Event Source # 

                              Methods

                              def :: Event

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "fileVersion" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "fileVersion" -> (a -> f b) -> Event -> f Event

                              ((~) * a ByteString, (~) * b ByteString, Functor f) => HasLens "graphDef" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "graphDef" -> (a -> f b) -> Event -> f Event

                              ((~) * a LogMessage, (~) * b LogMessage, Functor f) => HasLens "logMessage" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "logMessage" -> (a -> f b) -> Event -> f Event

                              ((~) * a (Maybe Text), (~) * b (Maybe Text), Functor f) => HasLens "maybe'fileVersion" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'fileVersion" -> (a -> f b) -> Event -> f Event

                              ((~) * a (Maybe ByteString), (~) * b (Maybe ByteString), Functor f) => HasLens "maybe'graphDef" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'graphDef" -> (a -> f b) -> Event -> f Event

                              ((~) * a (Maybe LogMessage), (~) * b (Maybe LogMessage), Functor f) => HasLens "maybe'logMessage" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'logMessage" -> (a -> f b) -> Event -> f Event

                              ((~) * a (Maybe ByteString), (~) * b (Maybe ByteString), Functor f) => HasLens "maybe'metaGraphDef" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'metaGraphDef" -> (a -> f b) -> Event -> f Event

                              ((~) * a (Maybe SessionLog), (~) * b (Maybe SessionLog), Functor f) => HasLens "maybe'sessionLog" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'sessionLog" -> (a -> f b) -> Event -> f Event

                              ((~) * a (Maybe Summary), (~) * b (Maybe Summary), Functor f) => HasLens "maybe'summary" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'summary" -> (a -> f b) -> Event -> f Event

                              ((~) * a (Maybe TaggedRunMetadata), (~) * b (Maybe TaggedRunMetadata), Functor f) => HasLens "maybe'taggedRunMetadata" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'taggedRunMetadata" -> (a -> f b) -> Event -> f Event

                              ((~) * a (Maybe Event'What), (~) * b (Maybe Event'What), Functor f) => HasLens "maybe'what" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'what" -> (a -> f b) -> Event -> f Event

                              ((~) * a ByteString, (~) * b ByteString, Functor f) => HasLens "metaGraphDef" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "metaGraphDef" -> (a -> f b) -> Event -> f Event

                              ((~) * a SessionLog, (~) * b SessionLog, Functor f) => HasLens "sessionLog" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "sessionLog" -> (a -> f b) -> Event -> f Event

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "step" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "step" -> (a -> f b) -> Event -> f Event

                              ((~) * a Summary, (~) * b Summary, Functor f) => HasLens "summary" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "summary" -> (a -> f b) -> Event -> f Event

                              ((~) * a TaggedRunMetadata, (~) * b TaggedRunMetadata, Functor f) => HasLens "taggedRunMetadata" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "taggedRunMetadata" -> (a -> f b) -> Event -> f Event

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "wallTime" f Event Event a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "wallTime" -> (a -> f b) -> Event -> f Event

                              data LogMessage Source #

                              Instances

                              Eq LogMessage Source # 
                              Ord LogMessage Source # 
                              Show LogMessage Source # 
                              Message LogMessage Source # 

                              Methods

                              descriptor :: MessageDescriptor LogMessage

                              Default LogMessage Source # 

                              Methods

                              def :: LogMessage

                              ((~) * a LogMessage'Level, (~) * b LogMessage'Level, Functor f) => HasLens "level" f LogMessage LogMessage a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "level" -> (a -> f b) -> LogMessage -> f LogMessage

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "message" f LogMessage LogMessage a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "message" -> (a -> f b) -> LogMessage -> f LogMessage

                              data LogMessage'Level Source #

                              Instances

                              Bounded LogMessage'Level Source # 
                              Enum LogMessage'Level Source # 
                              Eq LogMessage'Level Source # 
                              Ord LogMessage'Level Source # 
                              Show LogMessage'Level Source # 
                              MessageEnum LogMessage'Level Source # 
                              FieldDefault LogMessage'Level Source # 
                              Default LogMessage'Level Source # 

                              data SessionLog Source #

                              Instances

                              Eq SessionLog Source # 
                              Ord SessionLog Source # 
                              Show SessionLog Source # 
                              Message SessionLog Source # 

                              Methods

                              descriptor :: MessageDescriptor SessionLog

                              Default SessionLog Source # 

                              Methods

                              def :: SessionLog

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "checkpointPath" f SessionLog SessionLog a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "checkpointPath" -> (a -> f b) -> SessionLog -> f SessionLog

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "msg" f SessionLog SessionLog a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "msg" -> (a -> f b) -> SessionLog -> f SessionLog

                              ((~) * a SessionLog'SessionStatus, (~) * b SessionLog'SessionStatus, Functor f) => HasLens "status" f SessionLog SessionLog a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "status" -> (a -> f b) -> SessionLog -> f SessionLog

                              data SessionLog'SessionStatus Source #

                              Instances

                              Bounded SessionLog'SessionStatus Source # 
                              Enum SessionLog'SessionStatus Source # 
                              Eq SessionLog'SessionStatus Source # 
                              Ord SessionLog'SessionStatus Source # 
                              Show SessionLog'SessionStatus Source # 
                              MessageEnum SessionLog'SessionStatus Source # 
                              FieldDefault SessionLog'SessionStatus Source # 
                              Default SessionLog'SessionStatus Source # 

                              data TaggedRunMetadata Source #

                              checkpointPath :: forall f s t a b. HasLens "checkpointPath" f s t a b => LensLike f s t a b Source #

                              fileVersion :: forall f s t a b. HasLens "fileVersion" f s t a b => LensLike f s t a b Source #

                              graphDef :: forall f s t a b. HasLens "graphDef" f s t a b => LensLike f s t a b Source #

                              level :: forall f s t a b. HasLens "level" f s t a b => LensLike f s t a b Source #

                              logMessage :: forall f s t a b. HasLens "logMessage" f s t a b => LensLike f s t a b Source #

                              maybe'fileVersion :: forall f s t a b. HasLens "maybe'fileVersion" f s t a b => LensLike f s t a b Source #

                              maybe'graphDef :: forall f s t a b. HasLens "maybe'graphDef" f s t a b => LensLike f s t a b Source #

                              maybe'logMessage :: forall f s t a b. HasLens "maybe'logMessage" f s t a b => LensLike f s t a b Source #

                              maybe'metaGraphDef :: forall f s t a b. HasLens "maybe'metaGraphDef" f s t a b => LensLike f s t a b Source #

                              maybe'sessionLog :: forall f s t a b. HasLens "maybe'sessionLog" f s t a b => LensLike f s t a b Source #

                              maybe'summary :: forall f s t a b. HasLens "maybe'summary" f s t a b => LensLike f s t a b Source #

                              maybe'taggedRunMetadata :: forall f s t a b. HasLens "maybe'taggedRunMetadata" f s t a b => LensLike f s t a b Source #

                              maybe'what :: forall f s t a b. HasLens "maybe'what" f s t a b => LensLike f s t a b Source #

                              message :: forall f s t a b. HasLens "message" f s t a b => LensLike f s t a b Source #

                              metaGraphDef :: forall f s t a b. HasLens "metaGraphDef" f s t a b => LensLike f s t a b Source #

                              msg :: forall f s t a b. HasLens "msg" f s t a b => LensLike f s t a b Source #

                              runMetadata :: forall f s t a b. HasLens "runMetadata" f s t a b => LensLike f s t a b Source #

                              sessionLog :: forall f s t a b. HasLens "sessionLog" f s t a b => LensLike f s t a b Source #

                              status :: forall f s t a b. HasLens "status" f s t a b => LensLike f s t a b Source #

                              step :: forall f s t a b. HasLens "step" f s t a b => LensLike f s t a b Source #

                              summary :: forall f s t a b. HasLens "summary" f s t a b => LensLike f s t a b Source #

                              tag :: forall f s t a b. HasLens "tag" f s t a b => LensLike f s t a b Source #

                              taggedRunMetadata :: forall f s t a b. HasLens "taggedRunMetadata" f s t a b => LensLike f s t a b Source #

                              wallTime :: forall f s t a b. HasLens "wallTime" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-MemmappedFileSystem.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-MemmappedFileSystem.html new file mode 100644 index 0000000..40589c6 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-MemmappedFileSystem.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Util.MemmappedFileSystem

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Util.MemmappedFileSystem

                              Documentation

                              data MemmappedFileSystemDirectory Source #

                              Instances

                              Eq MemmappedFileSystemDirectory Source # 
                              Ord MemmappedFileSystemDirectory Source # 
                              Show MemmappedFileSystemDirectory Source # 
                              Message MemmappedFileSystemDirectory Source # 

                              Methods

                              descriptor :: MessageDescriptor MemmappedFileSystemDirectory

                              Default MemmappedFileSystemDirectory Source # 
                              ((~) * a [MemmappedFileSystemDirectoryElement], (~) * b [MemmappedFileSystemDirectoryElement], Functor f) => HasLens "element" f MemmappedFileSystemDirectory MemmappedFileSystemDirectory a b Source # 

                              data MemmappedFileSystemDirectoryElement Source #

                              Instances

                              Eq MemmappedFileSystemDirectoryElement Source # 
                              Ord MemmappedFileSystemDirectoryElement Source # 
                              Show MemmappedFileSystemDirectoryElement Source # 
                              Message MemmappedFileSystemDirectoryElement Source # 
                              Default MemmappedFileSystemDirectoryElement Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f MemmappedFileSystemDirectoryElement MemmappedFileSystemDirectoryElement a b Source # 
                              ((~) * a Word64, (~) * b Word64, Functor f) => HasLens "offset" f MemmappedFileSystemDirectoryElement MemmappedFileSystemDirectoryElement a b Source # 

                              element :: forall f s t a b. HasLens "element" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              offset :: forall f s t a b. HasLens "offset" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-SavedTensorSlice.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-SavedTensorSlice.html new file mode 100644 index 0000000..a13614a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-SavedTensorSlice.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Util.SavedTensorSlice

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Util.SavedTensorSlice

                              Documentation

                              data SavedSlice Source #

                              Instances

                              Eq SavedSlice Source # 
                              Ord SavedSlice Source # 
                              Show SavedSlice Source # 
                              Message SavedSlice Source # 

                              Methods

                              descriptor :: MessageDescriptor SavedSlice

                              Default SavedSlice Source # 

                              Methods

                              def :: SavedSlice

                              ((~) * a TensorProto, (~) * b TensorProto, Functor f) => HasLens "data'" f SavedSlice SavedSlice a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "data'" -> (a -> f b) -> SavedSlice -> f SavedSlice

                              ((~) * a (Maybe TensorProto), (~) * b (Maybe TensorProto), Functor f) => HasLens "maybe'data'" f SavedSlice SavedSlice a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'data'" -> (a -> f b) -> SavedSlice -> f SavedSlice

                              ((~) * a (Maybe TensorSliceProto), (~) * b (Maybe TensorSliceProto), Functor f) => HasLens "maybe'slice" f SavedSlice SavedSlice a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'slice" -> (a -> f b) -> SavedSlice -> f SavedSlice

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f SavedSlice SavedSlice a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> SavedSlice -> f SavedSlice

                              ((~) * a TensorSliceProto, (~) * b TensorSliceProto, Functor f) => HasLens "slice" f SavedSlice SavedSlice a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "slice" -> (a -> f b) -> SavedSlice -> f SavedSlice

                              data SavedSliceMeta Source #

                              Instances

                              Eq SavedSliceMeta Source # 
                              Ord SavedSliceMeta Source # 
                              Show SavedSliceMeta Source # 
                              Message SavedSliceMeta Source # 

                              Methods

                              descriptor :: MessageDescriptor SavedSliceMeta

                              Default SavedSliceMeta Source # 
                              ((~) * a (Maybe TensorShapeProto), (~) * b (Maybe TensorShapeProto), Functor f) => HasLens "maybe'shape" f SavedSliceMeta SavedSliceMeta a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'shape" -> (a -> f b) -> SavedSliceMeta -> f SavedSliceMeta

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f SavedSliceMeta SavedSliceMeta a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> SavedSliceMeta -> f SavedSliceMeta

                              ((~) * a TensorShapeProto, (~) * b TensorShapeProto, Functor f) => HasLens "shape" f SavedSliceMeta SavedSliceMeta a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "shape" -> (a -> f b) -> SavedSliceMeta -> f SavedSliceMeta

                              ((~) * a [TensorSliceProto], (~) * b [TensorSliceProto], Functor f) => HasLens "slice" f SavedSliceMeta SavedSliceMeta a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "slice" -> (a -> f b) -> SavedSliceMeta -> f SavedSliceMeta

                              ((~) * a DataType, (~) * b DataType, Functor f) => HasLens "type'" f SavedSliceMeta SavedSliceMeta a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "type'" -> (a -> f b) -> SavedSliceMeta -> f SavedSliceMeta

                              data SavedTensorSliceMeta Source #

                              Instances

                              Eq SavedTensorSliceMeta Source # 
                              Ord SavedTensorSliceMeta Source # 
                              Show SavedTensorSliceMeta Source # 
                              Message SavedTensorSliceMeta Source # 

                              Methods

                              descriptor :: MessageDescriptor SavedTensorSliceMeta

                              Default SavedTensorSliceMeta Source # 
                              ((~) * a (Maybe VersionDef), (~) * b (Maybe VersionDef), Functor f) => HasLens "maybe'versions" f SavedTensorSliceMeta SavedTensorSliceMeta a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'versions" -> (a -> f b) -> SavedTensorSliceMeta -> f SavedTensorSliceMeta

                              ((~) * a [SavedSliceMeta], (~) * b [SavedSliceMeta], Functor f) => HasLens "tensor" f SavedTensorSliceMeta SavedTensorSliceMeta a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "tensor" -> (a -> f b) -> SavedTensorSliceMeta -> f SavedTensorSliceMeta

                              ((~) * a VersionDef, (~) * b VersionDef, Functor f) => HasLens "versions" f SavedTensorSliceMeta SavedTensorSliceMeta a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "versions" -> (a -> f b) -> SavedTensorSliceMeta -> f SavedTensorSliceMeta

                              data SavedTensorSlices Source #

                              Instances

                              Eq SavedTensorSlices Source # 
                              Ord SavedTensorSlices Source # 
                              Show SavedTensorSlices Source # 
                              Message SavedTensorSlices Source # 

                              Methods

                              descriptor :: MessageDescriptor SavedTensorSlices

                              Default SavedTensorSlices Source # 
                              ((~) * a SavedSlice, (~) * b SavedSlice, Functor f) => HasLens "data'" f SavedTensorSlices SavedTensorSlices a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "data'" -> (a -> f b) -> SavedTensorSlices -> f SavedTensorSlices

                              ((~) * a (Maybe SavedSlice), (~) * b (Maybe SavedSlice), Functor f) => HasLens "maybe'data'" f SavedTensorSlices SavedTensorSlices a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'data'" -> (a -> f b) -> SavedTensorSlices -> f SavedTensorSlices

                              ((~) * a (Maybe SavedTensorSliceMeta), (~) * b (Maybe SavedTensorSliceMeta), Functor f) => HasLens "maybe'meta" f SavedTensorSlices SavedTensorSlices a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'meta" -> (a -> f b) -> SavedTensorSlices -> f SavedTensorSlices

                              ((~) * a SavedTensorSliceMeta, (~) * b SavedTensorSliceMeta, Functor f) => HasLens "meta" f SavedTensorSlices SavedTensorSlices a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "meta" -> (a -> f b) -> SavedTensorSlices -> f SavedTensorSlices

                              data' :: forall f s t a b. HasLens "data'" f s t a b => LensLike f s t a b Source #

                              maybe'data' :: forall f s t a b. HasLens "maybe'data'" f s t a b => LensLike f s t a b Source #

                              maybe'meta :: forall f s t a b. HasLens "maybe'meta" f s t a b => LensLike f s t a b Source #

                              maybe'shape :: forall f s t a b. HasLens "maybe'shape" f s t a b => LensLike f s t a b Source #

                              maybe'slice :: forall f s t a b. HasLens "maybe'slice" f s t a b => LensLike f s t a b Source #

                              maybe'versions :: forall f s t a b. HasLens "maybe'versions" f s t a b => LensLike f s t a b Source #

                              meta :: forall f s t a b. HasLens "meta" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              shape :: forall f s t a b. HasLens "shape" f s t a b => LensLike f s t a b Source #

                              slice :: forall f s t a b. HasLens "slice" f s t a b => LensLike f s t a b Source #

                              tensor :: forall f s t a b. HasLens "tensor" f s t a b => LensLike f s t a b Source #

                              type' :: forall f s t a b. HasLens "type'" f s t a b => LensLike f s t a b Source #

                              versions :: forall f s t a b. HasLens "versions" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-TestLog.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-TestLog.html new file mode 100644 index 0000000..98ba81b --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-TestLog.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Util.TestLog

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Safe HaskellNone
                              LanguageHaskell2010

                              Proto.Tensorflow.Core.Util.TestLog

                              Documentation

                              data AvailableDeviceInfo Source #

                              Instances

                              Eq AvailableDeviceInfo Source # 
                              Ord AvailableDeviceInfo Source # 
                              Show AvailableDeviceInfo Source # 
                              Message AvailableDeviceInfo Source # 

                              Methods

                              descriptor :: MessageDescriptor AvailableDeviceInfo

                              Default AvailableDeviceInfo Source # 
                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "memoryLimit" f AvailableDeviceInfo AvailableDeviceInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "memoryLimit" -> (a -> f b) -> AvailableDeviceInfo -> f AvailableDeviceInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f AvailableDeviceInfo AvailableDeviceInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> AvailableDeviceInfo -> f AvailableDeviceInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "physicalDescription" f AvailableDeviceInfo AvailableDeviceInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "physicalDescription" -> (a -> f b) -> AvailableDeviceInfo -> f AvailableDeviceInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "type'" f AvailableDeviceInfo AvailableDeviceInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "type'" -> (a -> f b) -> AvailableDeviceInfo -> f AvailableDeviceInfo

                              data BenchmarkEntry Source #

                              Instances

                              Eq BenchmarkEntry Source # 
                              Ord BenchmarkEntry Source # 
                              Show BenchmarkEntry Source # 
                              Message BenchmarkEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor BenchmarkEntry

                              Default BenchmarkEntry Source # 
                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "cpuTime" f BenchmarkEntry BenchmarkEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "cpuTime" -> (a -> f b) -> BenchmarkEntry -> f BenchmarkEntry

                              ((~) * a (Map Text EntryValue), (~) * b (Map Text EntryValue), Functor f) => HasLens "extras" f BenchmarkEntry BenchmarkEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "extras" -> (a -> f b) -> BenchmarkEntry -> f BenchmarkEntry

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "iters" f BenchmarkEntry BenchmarkEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "iters" -> (a -> f b) -> BenchmarkEntry -> f BenchmarkEntry

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f BenchmarkEntry BenchmarkEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> BenchmarkEntry -> f BenchmarkEntry

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "throughput" f BenchmarkEntry BenchmarkEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "throughput" -> (a -> f b) -> BenchmarkEntry -> f BenchmarkEntry

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "wallTime" f BenchmarkEntry BenchmarkEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "wallTime" -> (a -> f b) -> BenchmarkEntry -> f BenchmarkEntry

                              data BenchmarkEntry'ExtrasEntry Source #

                              Instances

                              Eq BenchmarkEntry'ExtrasEntry Source # 
                              Ord BenchmarkEntry'ExtrasEntry Source # 
                              Show BenchmarkEntry'ExtrasEntry Source # 
                              Message BenchmarkEntry'ExtrasEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor BenchmarkEntry'ExtrasEntry

                              Default BenchmarkEntry'ExtrasEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f BenchmarkEntry'ExtrasEntry BenchmarkEntry'ExtrasEntry a b Source # 
                              ((~) * a (Maybe EntryValue), (~) * b (Maybe EntryValue), Functor f) => HasLens "maybe'value" f BenchmarkEntry'ExtrasEntry BenchmarkEntry'ExtrasEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'value" -> (a -> f b) -> BenchmarkEntry'ExtrasEntry -> f BenchmarkEntry'ExtrasEntry

                              ((~) * a EntryValue, (~) * b EntryValue, Functor f) => HasLens "value" f BenchmarkEntry'ExtrasEntry BenchmarkEntry'ExtrasEntry a b Source # 

                              data BuildConfiguration Source #

                              Instances

                              Eq BuildConfiguration Source # 
                              Ord BuildConfiguration Source # 
                              Show BuildConfiguration Source # 
                              Message BuildConfiguration Source # 

                              Methods

                              descriptor :: MessageDescriptor BuildConfiguration

                              Default BuildConfiguration Source # 
                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "ccFlags" f BuildConfiguration BuildConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "ccFlags" -> (a -> f b) -> BuildConfiguration -> f BuildConfiguration

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "mode" f BuildConfiguration BuildConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "mode" -> (a -> f b) -> BuildConfiguration -> f BuildConfiguration

                              ((~) * a [Text], (~) * b [Text], Functor f) => HasLens "opts" f BuildConfiguration BuildConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "opts" -> (a -> f b) -> BuildConfiguration -> f BuildConfiguration

                              data CPUInfo Source #

                              Instances

                              Eq CPUInfo Source # 

                              Methods

                              (==) :: CPUInfo -> CPUInfo -> Bool #

                              (/=) :: CPUInfo -> CPUInfo -> Bool #

                              Ord CPUInfo Source # 
                              Show CPUInfo Source # 
                              Message CPUInfo Source # 

                              Methods

                              descriptor :: MessageDescriptor CPUInfo

                              Default CPUInfo Source # 

                              Methods

                              def :: CPUInfo

                              ((~) * a (Map Text Int64), (~) * b (Map Text Int64), Functor f) => HasLens "cacheSize" f CPUInfo CPUInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "cacheSize" -> (a -> f b) -> CPUInfo -> f CPUInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "cpuGovernor" f CPUInfo CPUInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "cpuGovernor" -> (a -> f b) -> CPUInfo -> f CPUInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "cpuInfo" f CPUInfo CPUInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "cpuInfo" -> (a -> f b) -> CPUInfo -> f CPUInfo

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "mhzPerCpu" f CPUInfo CPUInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "mhzPerCpu" -> (a -> f b) -> CPUInfo -> f CPUInfo

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "numCores" f CPUInfo CPUInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "numCores" -> (a -> f b) -> CPUInfo -> f CPUInfo

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "numCoresAllowed" f CPUInfo CPUInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "numCoresAllowed" -> (a -> f b) -> CPUInfo -> f CPUInfo

                              data CPUInfo'CacheSizeEntry Source #

                              Instances

                              Eq CPUInfo'CacheSizeEntry Source # 
                              Ord CPUInfo'CacheSizeEntry Source # 
                              Show CPUInfo'CacheSizeEntry Source # 
                              Message CPUInfo'CacheSizeEntry Source # 

                              Methods

                              descriptor :: MessageDescriptor CPUInfo'CacheSizeEntry

                              Default CPUInfo'CacheSizeEntry Source # 
                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "key" f CPUInfo'CacheSizeEntry CPUInfo'CacheSizeEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "key" -> (a -> f b) -> CPUInfo'CacheSizeEntry -> f CPUInfo'CacheSizeEntry

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "value" f CPUInfo'CacheSizeEntry CPUInfo'CacheSizeEntry a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "value" -> (a -> f b) -> CPUInfo'CacheSizeEntry -> f CPUInfo'CacheSizeEntry

                              data CommitId Source #

                              Constructors

                              CommitId 

                              Instances

                              Eq CommitId Source # 
                              Ord CommitId Source # 
                              Show CommitId Source # 
                              Message CommitId Source # 

                              Methods

                              descriptor :: MessageDescriptor CommitId

                              Default CommitId Source # 

                              Methods

                              def :: CommitId

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "changelist" f CommitId CommitId a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "changelist" -> (a -> f b) -> CommitId -> f CommitId

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "hash" f CommitId CommitId a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hash" -> (a -> f b) -> CommitId -> f CommitId

                              ((~) * a (Maybe Int64), (~) * b (Maybe Int64), Functor f) => HasLens "maybe'changelist" f CommitId CommitId a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'changelist" -> (a -> f b) -> CommitId -> f CommitId

                              ((~) * a (Maybe Text), (~) * b (Maybe Text), Functor f) => HasLens "maybe'hash" f CommitId CommitId a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'hash" -> (a -> f b) -> CommitId -> f CommitId

                              ((~) * a (Maybe CommitId'Kind), (~) * b (Maybe CommitId'Kind), Functor f) => HasLens "maybe'kind" f CommitId CommitId a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'kind" -> (a -> f b) -> CommitId -> f CommitId

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "snapshot" f CommitId CommitId a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "snapshot" -> (a -> f b) -> CommitId -> f CommitId

                              data EntryValue Source #

                              Instances

                              Eq EntryValue Source # 
                              Ord EntryValue Source # 
                              Show EntryValue Source # 
                              Message EntryValue Source # 

                              Methods

                              descriptor :: MessageDescriptor EntryValue

                              Default EntryValue Source # 

                              Methods

                              def :: EntryValue

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "doubleValue" f EntryValue EntryValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "doubleValue" -> (a -> f b) -> EntryValue -> f EntryValue

                              ((~) * a (Maybe Double), (~) * b (Maybe Double), Functor f) => HasLens "maybe'doubleValue" f EntryValue EntryValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'doubleValue" -> (a -> f b) -> EntryValue -> f EntryValue

                              ((~) * a (Maybe EntryValue'Kind), (~) * b (Maybe EntryValue'Kind), Functor f) => HasLens "maybe'kind" f EntryValue EntryValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'kind" -> (a -> f b) -> EntryValue -> f EntryValue

                              ((~) * a (Maybe Text), (~) * b (Maybe Text), Functor f) => HasLens "maybe'stringValue" f EntryValue EntryValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'stringValue" -> (a -> f b) -> EntryValue -> f EntryValue

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "stringValue" f EntryValue EntryValue a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "stringValue" -> (a -> f b) -> EntryValue -> f EntryValue

                              data GPUInfo Source #

                              Constructors

                              GPUInfo 

                              Fields

                              Instances

                              Eq GPUInfo Source # 

                              Methods

                              (==) :: GPUInfo -> GPUInfo -> Bool #

                              (/=) :: GPUInfo -> GPUInfo -> Bool #

                              Ord GPUInfo Source # 
                              Show GPUInfo Source # 
                              Message GPUInfo Source # 

                              Methods

                              descriptor :: MessageDescriptor GPUInfo

                              Default GPUInfo Source # 

                              Methods

                              def :: GPUInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "busId" f GPUInfo GPUInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "busId" -> (a -> f b) -> GPUInfo -> f GPUInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "model" f GPUInfo GPUInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "model" -> (a -> f b) -> GPUInfo -> f GPUInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "uuid" f GPUInfo GPUInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "uuid" -> (a -> f b) -> GPUInfo -> f GPUInfo

                              data MachineConfiguration Source #

                              Instances

                              Eq MachineConfiguration Source # 
                              Ord MachineConfiguration Source # 
                              Show MachineConfiguration Source # 
                              Message MachineConfiguration Source # 

                              Methods

                              descriptor :: MessageDescriptor MachineConfiguration

                              Default MachineConfiguration Source # 
                              ((~) * a [AvailableDeviceInfo], (~) * b [AvailableDeviceInfo], Functor f) => HasLens "availableDeviceInfo" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "availableDeviceInfo" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              ((~) * a CPUInfo, (~) * b CPUInfo, Functor f) => HasLens "cpuInfo" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "cpuInfo" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              ((~) * a [Any], (~) * b [Any], Functor f) => HasLens "deviceInfo" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "deviceInfo" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "hostname" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "hostname" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              ((~) * a (Maybe CPUInfo), (~) * b (Maybe CPUInfo), Functor f) => HasLens "maybe'cpuInfo" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'cpuInfo" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              ((~) * a (Maybe MemoryInfo), (~) * b (Maybe MemoryInfo), Functor f) => HasLens "maybe'memoryInfo" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'memoryInfo" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              ((~) * a (Maybe PlatformInfo), (~) * b (Maybe PlatformInfo), Functor f) => HasLens "maybe'platformInfo" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'platformInfo" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              ((~) * a MemoryInfo, (~) * b MemoryInfo, Functor f) => HasLens "memoryInfo" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "memoryInfo" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              ((~) * a PlatformInfo, (~) * b PlatformInfo, Functor f) => HasLens "platformInfo" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "platformInfo" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "serialIdentifier" f MachineConfiguration MachineConfiguration a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "serialIdentifier" -> (a -> f b) -> MachineConfiguration -> f MachineConfiguration

                              data MemoryInfo Source #

                              Instances

                              Eq MemoryInfo Source # 
                              Ord MemoryInfo Source # 
                              Show MemoryInfo Source # 
                              Message MemoryInfo Source # 

                              Methods

                              descriptor :: MessageDescriptor MemoryInfo

                              Default MemoryInfo Source # 

                              Methods

                              def :: MemoryInfo

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "available" f MemoryInfo MemoryInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "available" -> (a -> f b) -> MemoryInfo -> f MemoryInfo

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "total" f MemoryInfo MemoryInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "total" -> (a -> f b) -> MemoryInfo -> f MemoryInfo

                              data PlatformInfo Source #

                              Instances

                              Eq PlatformInfo Source # 
                              Ord PlatformInfo Source # 
                              Show PlatformInfo Source # 
                              Message PlatformInfo Source # 

                              Methods

                              descriptor :: MessageDescriptor PlatformInfo

                              Default PlatformInfo Source # 

                              Methods

                              def :: PlatformInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "bits" f PlatformInfo PlatformInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "bits" -> (a -> f b) -> PlatformInfo -> f PlatformInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "linkage" f PlatformInfo PlatformInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "linkage" -> (a -> f b) -> PlatformInfo -> f PlatformInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "machine" f PlatformInfo PlatformInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "machine" -> (a -> f b) -> PlatformInfo -> f PlatformInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "release" f PlatformInfo PlatformInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "release" -> (a -> f b) -> PlatformInfo -> f PlatformInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "system" f PlatformInfo PlatformInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "system" -> (a -> f b) -> PlatformInfo -> f PlatformInfo

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "version" f PlatformInfo PlatformInfo a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "version" -> (a -> f b) -> PlatformInfo -> f PlatformInfo

                              data TestResults Source #

                              Instances

                              Eq TestResults Source # 
                              Ord TestResults Source # 
                              Show TestResults Source # 
                              Message TestResults Source # 

                              Methods

                              descriptor :: MessageDescriptor TestResults

                              Default TestResults Source # 

                              Methods

                              def :: TestResults

                              ((~) * a TestResults'BenchmarkType, (~) * b TestResults'BenchmarkType, Functor f) => HasLens "benchmarkType" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "benchmarkType" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a BuildConfiguration, (~) * b BuildConfiguration, Functor f) => HasLens "buildConfiguration" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "buildConfiguration" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a CommitId, (~) * b CommitId, Functor f) => HasLens "commitId" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "commitId" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a BenchmarkEntries, (~) * b BenchmarkEntries, Functor f) => HasLens "entries" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "entries" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a MachineConfiguration, (~) * b MachineConfiguration, Functor f) => HasLens "machineConfiguration" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "machineConfiguration" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a (Maybe BuildConfiguration), (~) * b (Maybe BuildConfiguration), Functor f) => HasLens "maybe'buildConfiguration" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'buildConfiguration" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a (Maybe CommitId), (~) * b (Maybe CommitId), Functor f) => HasLens "maybe'commitId" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'commitId" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a (Maybe BenchmarkEntries), (~) * b (Maybe BenchmarkEntries), Functor f) => HasLens "maybe'entries" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'entries" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a (Maybe MachineConfiguration), (~) * b (Maybe MachineConfiguration), Functor f) => HasLens "maybe'machineConfiguration" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'machineConfiguration" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a (Maybe RunConfiguration), (~) * b (Maybe RunConfiguration), Functor f) => HasLens "maybe'runConfiguration" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "maybe'runConfiguration" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "name" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "name" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a RunConfiguration, (~) * b RunConfiguration, Functor f) => HasLens "runConfiguration" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "runConfiguration" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "runMode" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "runMode" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a Double, (~) * b Double, Functor f) => HasLens "runTime" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "runTime" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a Int64, (~) * b Int64, Functor f) => HasLens "startTime" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "startTime" -> (a -> f b) -> TestResults -> f TestResults

                              ((~) * a Text, (~) * b Text, Functor f) => HasLens "target" f TestResults TestResults a b Source # 

                              Methods

                              lensOf :: Proxy# Symbol "target" -> (a -> f b) -> TestResults -> f TestResults

                              data TestResults'BenchmarkType Source #

                              Instances

                              Bounded TestResults'BenchmarkType Source # 
                              Enum TestResults'BenchmarkType Source # 
                              Eq TestResults'BenchmarkType Source # 
                              Ord TestResults'BenchmarkType Source # 
                              Show TestResults'BenchmarkType Source # 
                              MessageEnum TestResults'BenchmarkType Source # 
                              FieldDefault TestResults'BenchmarkType Source # 
                              Default TestResults'BenchmarkType Source # 

                              argument :: forall f s t a b. HasLens "argument" f s t a b => LensLike f s t a b Source #

                              available :: forall f s t a b. HasLens "available" f s t a b => LensLike f s t a b Source #

                              availableDeviceInfo :: forall f s t a b. HasLens "availableDeviceInfo" f s t a b => LensLike f s t a b Source #

                              benchmarkType :: forall f s t a b. HasLens "benchmarkType" f s t a b => LensLike f s t a b Source #

                              bits :: forall f s t a b. HasLens "bits" f s t a b => LensLike f s t a b Source #

                              buildConfiguration :: forall f s t a b. HasLens "buildConfiguration" f s t a b => LensLike f s t a b Source #

                              busId :: forall f s t a b. HasLens "busId" f s t a b => LensLike f s t a b Source #

                              cacheSize :: forall f s t a b. HasLens "cacheSize" f s t a b => LensLike f s t a b Source #

                              ccFlags :: forall f s t a b. HasLens "ccFlags" f s t a b => LensLike f s t a b Source #

                              changelist :: forall f s t a b. HasLens "changelist" f s t a b => LensLike f s t a b Source #

                              commitId :: forall f s t a b. HasLens "commitId" f s t a b => LensLike f s t a b Source #

                              cpuGovernor :: forall f s t a b. HasLens "cpuGovernor" f s t a b => LensLike f s t a b Source #

                              cpuInfo :: forall f s t a b. HasLens "cpuInfo" f s t a b => LensLike f s t a b Source #

                              cpuTime :: forall f s t a b. HasLens "cpuTime" f s t a b => LensLike f s t a b Source #

                              deviceInfo :: forall f s t a b. HasLens "deviceInfo" f s t a b => LensLike f s t a b Source #

                              doubleValue :: forall f s t a b. HasLens "doubleValue" f s t a b => LensLike f s t a b Source #

                              entries :: forall f s t a b. HasLens "entries" f s t a b => LensLike f s t a b Source #

                              entry :: forall f s t a b. HasLens "entry" f s t a b => LensLike f s t a b Source #

                              extras :: forall f s t a b. HasLens "extras" f s t a b => LensLike f s t a b Source #

                              hash :: forall f s t a b. HasLens "hash" f s t a b => LensLike f s t a b Source #

                              hostname :: forall f s t a b. HasLens "hostname" f s t a b => LensLike f s t a b Source #

                              iters :: forall f s t a b. HasLens "iters" f s t a b => LensLike f s t a b Source #

                              key :: forall f s t a b. HasLens "key" f s t a b => LensLike f s t a b Source #

                              linkage :: forall f s t a b. HasLens "linkage" f s t a b => LensLike f s t a b Source #

                              machine :: forall f s t a b. HasLens "machine" f s t a b => LensLike f s t a b Source #

                              machineConfiguration :: forall f s t a b. HasLens "machineConfiguration" f s t a b => LensLike f s t a b Source #

                              maybe'buildConfiguration :: forall f s t a b. HasLens "maybe'buildConfiguration" f s t a b => LensLike f s t a b Source #

                              maybe'changelist :: forall f s t a b. HasLens "maybe'changelist" f s t a b => LensLike f s t a b Source #

                              maybe'commitId :: forall f s t a b. HasLens "maybe'commitId" f s t a b => LensLike f s t a b Source #

                              maybe'cpuInfo :: forall f s t a b. HasLens "maybe'cpuInfo" f s t a b => LensLike f s t a b Source #

                              maybe'doubleValue :: forall f s t a b. HasLens "maybe'doubleValue" f s t a b => LensLike f s t a b Source #

                              maybe'entries :: forall f s t a b. HasLens "maybe'entries" f s t a b => LensLike f s t a b Source #

                              maybe'hash :: forall f s t a b. HasLens "maybe'hash" f s t a b => LensLike f s t a b Source #

                              maybe'kind :: forall f s t a b. HasLens "maybe'kind" f s t a b => LensLike f s t a b Source #

                              maybe'machineConfiguration :: forall f s t a b. HasLens "maybe'machineConfiguration" f s t a b => LensLike f s t a b Source #

                              maybe'memoryInfo :: forall f s t a b. HasLens "maybe'memoryInfo" f s t a b => LensLike f s t a b Source #

                              maybe'platformInfo :: forall f s t a b. HasLens "maybe'platformInfo" f s t a b => LensLike f s t a b Source #

                              maybe'runConfiguration :: forall f s t a b. HasLens "maybe'runConfiguration" f s t a b => LensLike f s t a b Source #

                              maybe'stringValue :: forall f s t a b. HasLens "maybe'stringValue" f s t a b => LensLike f s t a b Source #

                              maybe'value :: forall f s t a b. HasLens "maybe'value" f s t a b => LensLike f s t a b Source #

                              memoryInfo :: forall f s t a b. HasLens "memoryInfo" f s t a b => LensLike f s t a b Source #

                              memoryLimit :: forall f s t a b. HasLens "memoryLimit" f s t a b => LensLike f s t a b Source #

                              mhzPerCpu :: forall f s t a b. HasLens "mhzPerCpu" f s t a b => LensLike f s t a b Source #

                              mode :: forall f s t a b. HasLens "mode" f s t a b => LensLike f s t a b Source #

                              model :: forall f s t a b. HasLens "model" f s t a b => LensLike f s t a b Source #

                              name :: forall f s t a b. HasLens "name" f s t a b => LensLike f s t a b Source #

                              numCores :: forall f s t a b. HasLens "numCores" f s t a b => LensLike f s t a b Source #

                              numCoresAllowed :: forall f s t a b. HasLens "numCoresAllowed" f s t a b => LensLike f s t a b Source #

                              opts :: forall f s t a b. HasLens "opts" f s t a b => LensLike f s t a b Source #

                              physicalDescription :: forall f s t a b. HasLens "physicalDescription" f s t a b => LensLike f s t a b Source #

                              platformInfo :: forall f s t a b. HasLens "platformInfo" f s t a b => LensLike f s t a b Source #

                              release :: forall f s t a b. HasLens "release" f s t a b => LensLike f s t a b Source #

                              runConfiguration :: forall f s t a b. HasLens "runConfiguration" f s t a b => LensLike f s t a b Source #

                              runMode :: forall f s t a b. HasLens "runMode" f s t a b => LensLike f s t a b Source #

                              runTime :: forall f s t a b. HasLens "runTime" f s t a b => LensLike f s t a b Source #

                              serialIdentifier :: forall f s t a b. HasLens "serialIdentifier" f s t a b => LensLike f s t a b Source #

                              snapshot :: forall f s t a b. HasLens "snapshot" f s t a b => LensLike f s t a b Source #

                              startTime :: forall f s t a b. HasLens "startTime" f s t a b => LensLike f s t a b Source #

                              stringValue :: forall f s t a b. HasLens "stringValue" f s t a b => LensLike f s t a b Source #

                              system :: forall f s t a b. HasLens "system" f s t a b => LensLike f s t a b Source #

                              target :: forall f s t a b. HasLens "target" f s t a b => LensLike f s t a b Source #

                              throughput :: forall f s t a b. HasLens "throughput" f s t a b => LensLike f s t a b Source #

                              total :: forall f s t a b. HasLens "total" f s t a b => LensLike f s t a b Source #

                              type' :: forall f s t a b. HasLens "type'" f s t a b => LensLike f s t a b Source #

                              uuid :: forall f s t a b. HasLens "uuid" f s t a b => LensLike f s t a b Source #

                              value :: forall f s t a b. HasLens "value" f s t a b => LensLike f s t a b Source #

                              version :: forall f s t a b. HasLens "version" f s t a b => LensLike f s t a b Source #

                              wallTime :: forall f s t a b. HasLens "wallTime" f s t a b => LensLike f s t a b Source #

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html index 950a75a..97d06da 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - _)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - _

                              _AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
                              _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
                              _Event'fileVersionProto.Tensorflow.Core.Util.Event
                              _Event'graphDefProto.Tensorflow.Core.Util.Event
                              _Event'logMessageProto.Tensorflow.Core.Util.Event
                              _Event'metaGraphDefProto.Tensorflow.Core.Util.Event
                              _Event'sessionLogProto.Tensorflow.Core.Util.Event
                              _Event'stepProto.Tensorflow.Core.Util.Event
                              _Event'summaryProto.Tensorflow.Core.Util.Event
                              _Event'taggedRunMetadataProto.Tensorflow.Core.Util.Event
                              _Event'wallTimeProto.Tensorflow.Core.Util.Event
                              _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
                              _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
                              _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
                              _HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'minProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'numProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
                              _LogMessage'levelProto.Tensorflow.Core.Util.Event
                              _LogMessage'messageProto.Tensorflow.Core.Util.Event
                              _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
                              _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
                              _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
                              _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
                              _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
                              _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
                              _OpList'opProto.Tensorflow.Core.Framework.OpDef
                              _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
                              _ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
                              _RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
                              _SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
                              _SessionLog'msgProto.Tensorflow.Core.Util.Event
                              _SessionLog'statusProto.Tensorflow.Core.Util.Event
                              _Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
                              _Summary'valueProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'audioProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'histoProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'imageProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'simpleValueProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'tensorProto.Tensorflow.Core.Framework.Summary
                              _SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
                              _TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
                              _TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
                              _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
                              _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
                              _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - _

                              _AllocationDescription'allocatedBytesProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'allocationIdProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'allocatorNameProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'hasSingleReferenceProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'ptrProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'requestedBytesProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocatorMemoryUsed'allocatorBytesInUseProto.Tensorflow.Core.Framework.StepStats
                              _AllocatorMemoryUsed'allocatorNameProto.Tensorflow.Core.Framework.StepStats
                              _AllocatorMemoryUsed'liveBytesProto.Tensorflow.Core.Framework.StepStats
                              _AllocatorMemoryUsed'peakBytesProto.Tensorflow.Core.Framework.StepStats
                              _AllocatorMemoryUsed'totalBytesProto.Tensorflow.Core.Framework.StepStats
                              _AssetFileDef'filenameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _AssetFileDef'tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'valueProto.Tensorflow.Core.Framework.AttrValue
                              _AutoParallelOptions'enableProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _AutoParallelOptions'numReplicasProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _AvailableDeviceInfo'memoryLimitProto.Tensorflow.Core.Util.TestLog
                              _AvailableDeviceInfo'nameProto.Tensorflow.Core.Util.TestLog
                              _AvailableDeviceInfo'physicalDescriptionProto.Tensorflow.Core.Util.TestLog
                              _AvailableDeviceInfo'type'Proto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntries'entryProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'cpuTimeProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'extrasProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'ExtrasEntry'keyProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'ExtrasEntry'valueProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'itersProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'nameProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'throughputProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'wallTimeProto.Tensorflow.Core.Util.TestLog
                              _BuildConfiguration'ccFlagsProto.Tensorflow.Core.Util.TestLog
                              _BuildConfiguration'modeProto.Tensorflow.Core.Util.TestLog
                              _BuildConfiguration'optsProto.Tensorflow.Core.Util.TestLog
                              _BundleEntryProto'crc32cProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'dtypeProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'offsetProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'shapeProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'shardIdProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'sizeProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'slicesProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleHeaderProto'endiannessProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleHeaderProto'numShardsProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleHeaderProto'versionProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BytesList'valueProto.Tensorflow.Core.Example.Feature
                              _ClusterDef'jobProto.Tensorflow.Core.Protobuf.Cluster
                              _CollectionDef'AnyList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'BytesList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'FloatList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'Int64List'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'kindProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'NodeList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CommitId'kindProto.Tensorflow.Core.Util.TestLog
                              _CommitId'snapshotProto.Tensorflow.Core.Util.TestLog
                              _CondContextDef'branchProto.Tensorflow.Core.Protobuf.ControlFlow
                              _CondContextDef'contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _CondContextDef'pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _CondContextDef'predNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _CondContextDef'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
                              _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'clusterDefProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
                              _CostGraphDef'nodeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'computeCostProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'computeTimeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'controlInputProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'deviceProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'devicePersistentMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'deviceTempMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'hostPersistentMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'hostTempMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'idProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'inputInfoProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'InputInfo'precedingNodeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'InputInfo'precedingPortProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'isFinalProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'memoryTimeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'nameProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'outputInfoProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'OutputInfo'aliasInputPortProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'OutputInfo'dtypeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'OutputInfo'shapeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'OutputInfo'sizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'temporaryMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CPUInfo'cacheSizeProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'CacheSizeEntry'keyProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'CacheSizeEntry'valueProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'cpuGovernorProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'cpuInfoProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'mhzPerCpuProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'numCoresProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'numCoresAllowedProto.Tensorflow.Core.Util.TestLog
                              _DebugOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Debug
                              _DebugOptions'globalStepProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'tolerateDebugOpCreationFailuresProto.Tensorflow.Core.Protobuf.Debug
                              _DeviceAttributes'deviceTypeProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'incarnationProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'localityProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'memoryLimitProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'nameProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'physicalDeviceDescProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceLocality'busIdProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceStepStats'deviceProto.Tensorflow.Core.Framework.StepStats
                              _DeviceStepStats'nodeStatsProto.Tensorflow.Core.Framework.StepStats
                              _EntryValue'kindProto.Tensorflow.Core.Util.TestLog
                              _Event'stepProto.Tensorflow.Core.Util.Event
                              _Event'wallTimeProto.Tensorflow.Core.Util.Event
                              _Event'whatProto.Tensorflow.Core.Util.Event
                              _Example'featuresProto.Tensorflow.Core.Example.Example
                              _ExampleParserConfiguration'featureMapProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _ExampleParserConfiguration'FeatureMapEntry'keyProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _ExampleParserConfiguration'FeatureMapEntry'valueProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _Feature'kindProto.Tensorflow.Core.Example.Feature
                              _FeatureConfiguration'configProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FeatureList'featureProto.Tensorflow.Core.Example.Feature
                              _FeatureLists'featureListProto.Tensorflow.Core.Example.Feature
                              _FeatureLists'FeatureListEntry'keyProto.Tensorflow.Core.Example.Feature
                              _FeatureLists'FeatureListEntry'valueProto.Tensorflow.Core.Example.Feature
                              _Features'featureProto.Tensorflow.Core.Example.Feature
                              _Features'FeatureEntry'keyProto.Tensorflow.Core.Example.Feature
                              _Features'FeatureEntry'valueProto.Tensorflow.Core.Example.Feature
                              _FixedLenFeatureProto'defaultValueProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FixedLenFeatureProto'dtypeProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FixedLenFeatureProto'shapeProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FixedLenFeatureProto'valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FloatList'valueProto.Tensorflow.Core.Example.Feature
                              _FunctionDef'attrProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'AttrEntry'keyProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'AttrEntry'valueProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'nodeDefProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'retProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'RetEntry'keyProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'RetEntry'valueProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'signatureProto.Tensorflow.Core.Framework.Function
                              _FunctionDefLibrary'functionProto.Tensorflow.Core.Framework.Function
                              _FunctionDefLibrary'gradientProto.Tensorflow.Core.Framework.Function
                              _GPUInfo'busIdProto.Tensorflow.Core.Util.TestLog
                              _GPUInfo'modelProto.Tensorflow.Core.Util.TestLog
                              _GPUInfo'uuidProto.Tensorflow.Core.Util.TestLog
                              _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'forceGpuCompatibleProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'pollingActiveDelayUsecsProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'pollingInactiveDelayMsecsProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
                              _GradientDef'functionNameProto.Tensorflow.Core.Framework.Function
                              _GradientDef'gradientFuncProto.Tensorflow.Core.Framework.Function
                              _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
                              _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
                              _HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'minProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'numProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
                              _Int64List'valueProto.Tensorflow.Core.Example.Feature
                              _JobDef'nameProto.Tensorflow.Core.Protobuf.Cluster
                              _JobDef'tasksProto.Tensorflow.Core.Protobuf.Cluster
                              _JobDef'TasksEntry'keyProto.Tensorflow.Core.Protobuf.Cluster
                              _JobDef'TasksEntry'valueProto.Tensorflow.Core.Protobuf.Cluster
                              _KernelDef'AttrConstraint'allowedValuesProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'AttrConstraint'nameProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'constraintProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'deviceTypeProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'hostMemoryArgProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'labelProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'opProto.Tensorflow.Core.Framework.KernelDef
                              _LogMessage'levelProto.Tensorflow.Core.Util.Event
                              _LogMessage'messageProto.Tensorflow.Core.Util.Event
                              _MachineConfiguration'availableDeviceInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'cpuInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'deviceInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'hostnameProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'memoryInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'platformInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'serialIdentifierProto.Tensorflow.Core.Util.TestLog
                              _MemmappedFileSystemDirectory'elementProto.Tensorflow.Core.Util.MemmappedFileSystem
                              _MemmappedFileSystemDirectoryElement'nameProto.Tensorflow.Core.Util.MemmappedFileSystem
                              _MemmappedFileSystemDirectoryElement'offsetProto.Tensorflow.Core.Util.MemmappedFileSystem
                              _MemoryInfo'availableProto.Tensorflow.Core.Util.TestLog
                              _MemoryInfo'totalProto.Tensorflow.Core.Util.TestLog
                              _MemoryLogRawAllocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'numBytesProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'operationProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'ptrProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'deferredProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'operationProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogStep'handleProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogStep'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorAllocation'kernelNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorAllocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorAllocation'tensorProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorDeallocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorDeallocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorOutput'indexProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorOutput'kernelNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorOutput'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorOutput'tensorProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryStats'devicePersistentMemorySizeProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'devicePersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'deviceTempMemorySizeProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'hostPersistentMemorySizeProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'hostPersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'hostTempMemorySizeProto.Tensorflow.Core.Framework.StepStats
                              _MetaGraphDef'assetFileDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'collectionDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'CollectionDefEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'CollectionDefEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'graphDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'metaGraphVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'tagsProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'tensorflowGitVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'tensorflowVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'signatureDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'SignatureDefEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'SignatureDefEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
                              _NamedTensorProto'nameProto.Tensorflow.Core.Protobuf.NamedTensor
                              _NamedTensorProto'tensorProto.Tensorflow.Core.Protobuf.NamedTensor
                              _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
                              _NodeExecStats'allEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'allStartMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'memoryProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'memoryStatsProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'nodeNameProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'opEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'opStartRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'outputProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'referencedTensorProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'scheduledMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'threadIdProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'timelineLabelProto.Tensorflow.Core.Framework.StepStats
                              _NodeOutput'slotProto.Tensorflow.Core.Framework.StepStats
                              _NodeOutput'tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
                              _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
                              _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
                              _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
                              _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
                              _OpList'opProto.Tensorflow.Core.Framework.OpDef
                              _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
                              _PlatformInfo'bitsProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'linkageProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'machineProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'releaseProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'systemProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'versionProto.Tensorflow.Core.Util.TestLog
                              _QueueRunnerDef'cancelOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              _QueueRunnerDef'closeOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              _QueueRunnerDef'enqueueOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              _QueueRunnerDef'queueClosedExceptionTypesProto.Tensorflow.Core.Protobuf.QueueRunner
                              _QueueRunnerDef'queueNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              _ResourceHandleProto'containerProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandleProto'deviceProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandleProto'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandleProto'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandleProto'nameProto.Tensorflow.Core.Framework.ResourceHandle
                              _RewriterConfig'autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'constantFoldingProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'disableModelPruningProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'memoryOptimizationProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'optimizersProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'optimizeTensorLayoutProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
                              _RunConfiguration'argumentProto.Tensorflow.Core.Util.TestLog
                              _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
                              _SavedModel'metaGraphsProto.Tensorflow.Core.Protobuf.SavedModel
                              _SavedModel'savedModelSchemaVersionProto.Tensorflow.Core.Protobuf.SavedModel
                              _SavedSlice'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSlice'nameProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSlice'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSliceMeta'nameProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSliceMeta'shapeProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSliceMeta'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSliceMeta'type'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedTensorSliceMeta'tensorProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedTensorSliceMeta'versionsProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedTensorSlices'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedTensorSlices'metaProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SaverDef'filenameTensorNameProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'keepCheckpointEveryNHoursProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'maxToKeepProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'restoreOpNameProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'saveTensorNameProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'shardedProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'versionProto.Tensorflow.Core.Protobuf.Saver
                              _SaveSliceInfoDef'fullNameProto.Tensorflow.Core.Framework.Variable
                              _SaveSliceInfoDef'fullShapeProto.Tensorflow.Core.Framework.Variable
                              _SaveSliceInfoDef'varOffsetProto.Tensorflow.Core.Framework.Variable
                              _SaveSliceInfoDef'varShapeProto.Tensorflow.Core.Framework.Variable
                              _SequenceExample'contextProto.Tensorflow.Core.Example.Example
                              _SequenceExample'featureListsProto.Tensorflow.Core.Example.Example
                              _ServerDef'clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _ServerDef'defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _ServerDef'jobNameProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _ServerDef'protocolProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _ServerDef'taskIndexProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
                              _SessionLog'msgProto.Tensorflow.Core.Util.Event
                              _SessionLog'statusProto.Tensorflow.Core.Util.Event
                              _SignatureDef'inputsProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'InputsEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'InputsEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'methodNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'outputsProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'OutputsEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'OutputsEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _StepStats'devStatsProto.Tensorflow.Core.Framework.StepStats
                              _Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
                              _Summary'valueProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'metadataProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'valueProto.Tensorflow.Core.Framework.Summary
                              _SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'displayNameProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'pluginDataProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'PluginData'contentProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'PluginData'pluginNameProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'summaryDescriptionProto.Tensorflow.Core.Framework.Summary
                              _TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
                              _TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
                              _TensorDescription'allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
                              _TensorDescription'dtypeProto.Tensorflow.Core.Framework.TensorDescription
                              _TensorDescription'shapeProto.Tensorflow.Core.Framework.TensorDescription
                              _TensorInfo'CooSparse'denseShapeTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'CooSparse'indicesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'CooSparse'valuesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'dtypeProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'encodingProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'tensorShapeProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
                              _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
                              _TensorSliceProto'extentProto.Tensorflow.Core.Framework.TensorSlice
                              _TensorSliceProto'Extent'hasLengthProto.Tensorflow.Core.Framework.TensorSlice
                              _TensorSliceProto'Extent'startProto.Tensorflow.Core.Framework.TensorSlice
                              _TestResults'benchmarkTypeProto.Tensorflow.Core.Util.TestLog
                              _TestResults'buildConfigurationProto.Tensorflow.Core.Util.TestLog
                              _TestResults'commitIdProto.Tensorflow.Core.Util.TestLog
                              _TestResults'entriesProto.Tensorflow.Core.Util.TestLog
                              _TestResults'machineConfigurationProto.Tensorflow.Core.Util.TestLog
                              _TestResults'nameProto.Tensorflow.Core.Util.TestLog
                              _TestResults'runConfigurationProto.Tensorflow.Core.Util.TestLog
                              _TestResults'runModeProto.Tensorflow.Core.Util.TestLog
                              _TestResults'runTimeProto.Tensorflow.Core.Util.TestLog
                              _TestResults'startTimeProto.Tensorflow.Core.Util.TestLog
                              _TestResults'targetProto.Tensorflow.Core.Util.TestLog
                              _ThreadPoolOptionProto'globalNameProto.Tensorflow.Core.Protobuf.Config
                              _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ValuesDef'externalValuesProto.Tensorflow.Core.Protobuf.ControlFlow
                              _ValuesDef'ExternalValuesEntry'keyProto.Tensorflow.Core.Protobuf.ControlFlow
                              _ValuesDef'ExternalValuesEntry'valueProto.Tensorflow.Core.Protobuf.ControlFlow
                              _ValuesDef'valuesProto.Tensorflow.Core.Protobuf.ControlFlow
                              _VariableDef'initializerNameProto.Tensorflow.Core.Framework.Variable
                              _VariableDef'isResourceProto.Tensorflow.Core.Framework.Variable
                              _VariableDef'saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
                              _VariableDef'snapshotNameProto.Tensorflow.Core.Framework.Variable
                              _VariableDef'variableNameProto.Tensorflow.Core.Framework.Variable
                              _VarLenFeatureProto'dtypeProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _VarLenFeatureProto'indicesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _VarLenFeatureProto'shapesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _VarLenFeatureProto'valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _VersionDef'badConsumersProto.Tensorflow.Core.Framework.Versions
                              _VersionDef'minConsumerProto.Tensorflow.Core.Framework.Versions
                              _VersionDef'producerProto.Tensorflow.Core.Framework.Versions
                              _WhileContextDef'backPropProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'loopEnterNamesProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'loopExitNamesProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'parallelIterationsProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'pivotForBodyNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'pivotForPredNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'swapMemoryProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html index 2a1357e..096cbab 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - A)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - A

                              ABORTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              aliasInputPortProto.Tensorflow.Core.Framework.CostGraph
                              allEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              allocatedBytesProto.Tensorflow.Core.Framework.AllocationDescription
                              AllocationDescription 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AllocationDescription
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AllocationDescription
                              allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
                              allocationId 
                              1 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
                              2 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              allocatorBytesInUseProto.Tensorflow.Core.Framework.StepStats
                              AllocatorMemoryUsed 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              allocatorName 
                              1 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
                              2 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              3 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              allocatorTypeProto.Tensorflow.Core.Protobuf.Config
                              allowedValues 
                              1 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              allowGrowthProto.Tensorflow.Core.Protobuf.Config
                              allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
                              allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
                              allStartMicrosProto.Tensorflow.Core.Framework.StepStats
                              ALREADY_EXISTSProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              anyListProto.Tensorflow.Core.Protobuf.MetaGraph
                              argumentProto.Tensorflow.Core.Util.TestLog
                              AssetFileDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              assetFileDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              attr 
                              1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              3 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              4 (Function)Proto.Tensorflow.Core.Framework.Function
                              AttrValue 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'BProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'FProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'FuncProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'IProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'ListProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'ListValue 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'PlaceholderProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'SProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'ShapeProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'TensorProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'TypeProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'ValueProto.Tensorflow.Core.Framework.AttrValue
                              audioProto.Tensorflow.Core.Framework.Summary
                              autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
                              AutoParallelOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.RewriterConfig
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.RewriterConfig
                              availableProto.Tensorflow.Core.Util.TestLog
                              AvailableDeviceInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              availableDeviceInfoProto.Tensorflow.Core.Util.TestLog
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html index 8523ff5..36f60f7 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index

                              allocatorTypeProto.Tensorflow.Core.Protobuf.Config
                              allowedValuesProto.Tensorflow.Core.Framework.OpDef
                              allowGrowthProto.Tensorflow.Core.Protobuf.Config
                              allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
                              allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
                              attr 
                              1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              3 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              AttrValue 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'ListValue 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              audioProto.Tensorflow.Core.Framework.Summary
                              bProto.Tensorflow.Core.Framework.AttrValue
                              boolValProto.Tensorflow.Core.Framework.Tensor
                              bucketProto.Tensorflow.Core.Framework.Summary
                              bucketLimitProto.Tensorflow.Core.Framework.Summary
                              buildCostModelProto.Tensorflow.Core.Protobuf.Config
                              buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
                              checkpointPathProto.Tensorflow.Core.Util.Event
                              colorspaceProto.Tensorflow.Core.Framework.Summary
                              ConfigProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              ConfigProto'DeviceCountEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              containerProto.Tensorflow.Core.Framework.ResourceHandle
                              contentTypeProto.Tensorflow.Core.Framework.Summary
                              costGraphProto.Tensorflow.Core.Protobuf.Config
                              DataTypeProto.Tensorflow.Core.Framework.Types
                              dcomplexValProto.Tensorflow.Core.Framework.Tensor
                              debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              defaultValueProto.Tensorflow.Core.Framework.OpDef
                              deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
                              deprecationProto.Tensorflow.Core.Framework.OpDef
                              descriptionProto.Tensorflow.Core.Framework.OpDef
                              device 
                              1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              deviceCountProto.Tensorflow.Core.Protobuf.Config
                              deviceFiltersProto.Tensorflow.Core.Protobuf.Config
                              dimProto.Tensorflow.Core.Framework.TensorShape
                              doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
                              doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
                              doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
                              doubleValProto.Tensorflow.Core.Framework.Tensor
                              dtypeProto.Tensorflow.Core.Framework.Tensor
                              DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
                              DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_BOOLProto.Tensorflow.Core.Framework.Types
                              DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
                              DT_DOUBLEProto.Tensorflow.Core.Framework.Types
                              DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
                              DT_FLOATProto.Tensorflow.Core.Framework.Types
                              DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
                              DT_HALFProto.Tensorflow.Core.Framework.Types
                              DT_HALF_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT16Proto.Tensorflow.Core.Framework.Types
                              DT_INT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT32Proto.Tensorflow.Core.Framework.Types
                              DT_INT32_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT64Proto.Tensorflow.Core.Framework.Types
                              DT_INT64_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT8Proto.Tensorflow.Core.Framework.Types
                              DT_INT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_INVALIDProto.Tensorflow.Core.Framework.Types
                              DT_QINT16Proto.Tensorflow.Core.Framework.Types
                              DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_QINT32Proto.Tensorflow.Core.Framework.Types
                              DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
                              DT_QINT8Proto.Tensorflow.Core.Framework.Types
                              DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_QUINT16Proto.Tensorflow.Core.Framework.Types
                              DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_QUINT8Proto.Tensorflow.Core.Framework.Types
                              DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_RESOURCEProto.Tensorflow.Core.Framework.Types
                              DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
                              DT_STRINGProto.Tensorflow.Core.Framework.Types
                              DT_STRING_REFProto.Tensorflow.Core.Framework.Types
                              DT_UINT16Proto.Tensorflow.Core.Framework.Types
                              DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_UINT8Proto.Tensorflow.Core.Framework.Types
                              DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
                              enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
                              enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
                              encodedAudioStringProto.Tensorflow.Core.Framework.Summary
                              encodedImageStringProto.Tensorflow.Core.Framework.Summary
                              Event 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              explanationProto.Tensorflow.Core.Framework.OpDef
                              fProto.Tensorflow.Core.Framework.AttrValue
                              fileVersionProto.Tensorflow.Core.Util.Event
                              floatValProto.Tensorflow.Core.Framework.Tensor
                              funcProto.Tensorflow.Core.Framework.AttrValue
                              globalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              GPUOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              GraphDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
                              graphDefProto.Tensorflow.Core.Util.Event
                              GraphOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              halfValProto.Tensorflow.Core.Framework.Tensor
                              hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
                              hasMinimumProto.Tensorflow.Core.Framework.OpDef
                              heightProto.Tensorflow.Core.Framework.Summary
                              histoProto.Tensorflow.Core.Framework.Summary
                              HistogramProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              iProto.Tensorflow.Core.Framework.AttrValue
                              imageProto.Tensorflow.Core.Framework.Summary
                              inferShapesProto.Tensorflow.Core.Protobuf.Config
                              inputProto.Tensorflow.Core.Framework.NodeDef
                              inputArgProto.Tensorflow.Core.Framework.OpDef
                              int64ValProto.Tensorflow.Core.Framework.Tensor
                              interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              intValProto.Tensorflow.Core.Framework.Tensor
                              isAggregateProto.Tensorflow.Core.Framework.OpDef
                              isCommutativeProto.Tensorflow.Core.Framework.OpDef
                              isRefProto.Tensorflow.Core.Framework.OpDef
                              isStatefulProto.Tensorflow.Core.Framework.OpDef
                              key 
                              1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              3 (Function)Proto.Tensorflow.Core.Protobuf.Config
                              lengthFramesProto.Tensorflow.Core.Framework.Summary
                              levelProto.Tensorflow.Core.Util.Event
                              libraryProto.Tensorflow.Core.Framework.Graph
                              listProto.Tensorflow.Core.Framework.AttrValue
                              logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
                              LogMessage 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              logMessageProto.Tensorflow.Core.Util.Event
                              LogMessage'DEBUGProto.Tensorflow.Core.Util.Event
                              LogMessage'ERRORProto.Tensorflow.Core.Util.Event
                              LogMessage'FATALProto.Tensorflow.Core.Util.Event
                              LogMessage'INFOProto.Tensorflow.Core.Util.Event
                              LogMessage'LevelProto.Tensorflow.Core.Util.Event
                              LogMessage'UNKNOWNProto.Tensorflow.Core.Util.Event
                              LogMessage'WARNProto.Tensorflow.Core.Util.Event
                              maxProto.Tensorflow.Core.Framework.Summary
                              maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
                              maybe'audioProto.Tensorflow.Core.Framework.Summary
                              maybe'bProto.Tensorflow.Core.Framework.AttrValue
                              maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
                              maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
                              maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
                              maybe'fProto.Tensorflow.Core.Framework.AttrValue
                              maybe'fileVersionProto.Tensorflow.Core.Util.Event
                              maybe'funcProto.Tensorflow.Core.Framework.AttrValue
                              maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'graphDefProto.Tensorflow.Core.Util.Event
                              maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'histoProto.Tensorflow.Core.Framework.Summary
                              maybe'iProto.Tensorflow.Core.Framework.AttrValue
                              maybe'imageProto.Tensorflow.Core.Framework.Summary
                              maybe'libraryProto.Tensorflow.Core.Framework.Graph
                              maybe'listProto.Tensorflow.Core.Framework.AttrValue
                              maybe'logMessageProto.Tensorflow.Core.Util.Event
                              maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
                              maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
                              maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'sProto.Tensorflow.Core.Framework.AttrValue
                              maybe'sessionLogProto.Tensorflow.Core.Util.Event
                              maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
                              maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
                              maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
                              maybe'summaryProto.Tensorflow.Core.Util.Event
                              maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
                              maybe'tensor 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
                              maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
                              maybe'value 
                              1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              maybe'versionsProto.Tensorflow.Core.Framework.Graph
                              maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
                              messageProto.Tensorflow.Core.Util.Event
                              metaGraphDefProto.Tensorflow.Core.Util.Event
                              minProto.Tensorflow.Core.Framework.Summary
                              minimumProto.Tensorflow.Core.Framework.OpDef
                              msgProto.Tensorflow.Core.Util.Event
                              name 
                              1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
                              2 (Function)Proto.Tensorflow.Core.Framework.TensorShape
                              3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              4 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              5 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              NameAttrList 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              NameAttrList'AttrEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              nodeProto.Tensorflow.Core.Framework.Graph
                              NodeDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
                              NodeDef'AttrEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
                              nodeNameProto.Tensorflow.Core.Framework.Summary
                              numProto.Tensorflow.Core.Framework.Summary
                              numberAttrProto.Tensorflow.Core.Framework.OpDef
                              numChannelsProto.Tensorflow.Core.Framework.Summary
                              numThreadsProto.Tensorflow.Core.Protobuf.Config
                              obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              op 
                              1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef'ArgDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef'AttrDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDeprecation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              OpList 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OptimizerOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
                              optLevelProto.Tensorflow.Core.Protobuf.Config
                              outputArgProto.Tensorflow.Core.Framework.OpDef
                              outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              partitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
                              placeholderProto.Tensorflow.Core.Framework.AttrValue
                              placementPeriodProto.Tensorflow.Core.Protobuf.Config
                              placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
                              ResourceHandle 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
                              resourceHandleValProto.Tensorflow.Core.Framework.Tensor
                              RPCOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              RunMetadata 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              runMetadataProto.Tensorflow.Core.Util.Event
                              RunOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
                              sProto.Tensorflow.Core.Framework.AttrValue
                              sampleRateProto.Tensorflow.Core.Framework.Summary
                              scomplexValProto.Tensorflow.Core.Framework.Tensor
                              sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              SessionLog 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              sessionLogProto.Tensorflow.Core.Util.Event
                              SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
                              SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
                              SessionLog'STARTProto.Tensorflow.Core.Util.Event
                              SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
                              SessionLog'STOPProto.Tensorflow.Core.Util.Event
                              shapeProto.Tensorflow.Core.Framework.AttrValue
                              simpleValueProto.Tensorflow.Core.Framework.Summary
                              sizeProto.Tensorflow.Core.Framework.TensorShape
                              statusProto.Tensorflow.Core.Util.Event
                              stepProto.Tensorflow.Core.Util.Event
                              stepStatsProto.Tensorflow.Core.Protobuf.Config
                              stringValProto.Tensorflow.Core.Framework.Tensor
                              sumProto.Tensorflow.Core.Framework.Summary
                              Summary 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              summary 
                              1 (Function)Proto.Tensorflow.Core.Util.Event
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              Summary'Audio 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Image 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Value 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              SummaryDescription 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              sumSquaresProto.Tensorflow.Core.Framework.Summary
                              tag 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Util.Event
                              TaggedRunMetadata 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              taggedRunMetadataProto.Tensorflow.Core.Util.Event
                              tensor 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              tensorContentProto.Tensorflow.Core.Framework.Tensor
                              TensorProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
                              tensorShapeProto.Tensorflow.Core.Framework.Tensor
                              TensorShapeProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
                              TensorShapeProto'Dim 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
                              ThreadPoolOptionProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              timelineStepProto.Tensorflow.Core.Protobuf.Config
                              timeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              traceLevelProto.Tensorflow.Core.Protobuf.Config
                              type' 
                              1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              typeAttrProto.Tensorflow.Core.Framework.OpDef
                              typeHintProto.Tensorflow.Core.Framework.Summary
                              typeListAttrProto.Tensorflow.Core.Framework.OpDef
                              unknownRankProto.Tensorflow.Core.Framework.TensorShape
                              usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
                              useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
                              value 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              4 (Function)Proto.Tensorflow.Core.Protobuf.Config
                              version 
                              1 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Function)Proto.Tensorflow.Core.Framework.Graph
                              versionNumberProto.Tensorflow.Core.Framework.Tensor
                              versionsProto.Tensorflow.Core.Framework.Graph
                              visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
                              wallTimeProto.Tensorflow.Core.Util.Event
                              widthProto.Tensorflow.Core.Framework.Summary
                              _AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
                              _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
                              _Event'fileVersionProto.Tensorflow.Core.Util.Event
                              _Event'graphDefProto.Tensorflow.Core.Util.Event
                              _Event'logMessageProto.Tensorflow.Core.Util.Event
                              _Event'metaGraphDefProto.Tensorflow.Core.Util.Event
                              _Event'sessionLogProto.Tensorflow.Core.Util.Event
                              _Event'stepProto.Tensorflow.Core.Util.Event
                              _Event'summaryProto.Tensorflow.Core.Util.Event
                              _Event'taggedRunMetadataProto.Tensorflow.Core.Util.Event
                              _Event'wallTimeProto.Tensorflow.Core.Util.Event
                              _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
                              _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
                              _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
                              _HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'minProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'numProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
                              _LogMessage'levelProto.Tensorflow.Core.Util.Event
                              _LogMessage'messageProto.Tensorflow.Core.Util.Event
                              _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
                              _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
                              _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
                              _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
                              _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
                              _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
                              _OpList'opProto.Tensorflow.Core.Framework.OpDef
                              _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
                              _ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
                              _RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
                              _SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
                              _SessionLog'msgProto.Tensorflow.Core.Util.Event
                              _SessionLog'statusProto.Tensorflow.Core.Util.Event
                              _Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
                              _Summary'valueProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'audioProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'histoProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'imageProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'simpleValueProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'tensorProto.Tensorflow.Core.Framework.Summary
                              _SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
                              _TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
                              _TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
                              _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
                              _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
                              _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index

                              ABORTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              aliasInputPortProto.Tensorflow.Core.Framework.CostGraph
                              allEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              allocatedBytesProto.Tensorflow.Core.Framework.AllocationDescription
                              AllocationDescription 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AllocationDescription
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AllocationDescription
                              allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
                              allocationId 
                              1 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
                              2 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              allocatorBytesInUseProto.Tensorflow.Core.Framework.StepStats
                              AllocatorMemoryUsed 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              allocatorName 
                              1 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
                              2 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              3 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              allocatorTypeProto.Tensorflow.Core.Protobuf.Config
                              allowedValues 
                              1 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              allowGrowthProto.Tensorflow.Core.Protobuf.Config
                              allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
                              allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
                              allStartMicrosProto.Tensorflow.Core.Framework.StepStats
                              ALREADY_EXISTSProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              anyListProto.Tensorflow.Core.Protobuf.MetaGraph
                              argumentProto.Tensorflow.Core.Util.TestLog
                              AssetFileDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              assetFileDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              attr 
                              1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              3 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              4 (Function)Proto.Tensorflow.Core.Framework.Function
                              AttrValue 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'BProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'FProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'FuncProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'IProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'ListProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'ListValue 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'PlaceholderProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'SProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'ShapeProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'TensorProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'TypeProto.Tensorflow.Core.Framework.AttrValue
                              AttrValue'ValueProto.Tensorflow.Core.Framework.AttrValue
                              audioProto.Tensorflow.Core.Framework.Summary
                              autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
                              AutoParallelOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.RewriterConfig
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.RewriterConfig
                              availableProto.Tensorflow.Core.Util.TestLog
                              AvailableDeviceInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              availableDeviceInfoProto.Tensorflow.Core.Util.TestLog
                              bProto.Tensorflow.Core.Framework.AttrValue
                              backPropProto.Tensorflow.Core.Protobuf.ControlFlow
                              badConsumersProto.Tensorflow.Core.Framework.Versions
                              BenchmarkEntries 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              BenchmarkEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              BenchmarkEntry'ExtrasEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              benchmarkTypeProto.Tensorflow.Core.Util.TestLog
                              bitsProto.Tensorflow.Core.Util.TestLog
                              boolValProto.Tensorflow.Core.Framework.Tensor
                              branchProto.Tensorflow.Core.Protobuf.ControlFlow
                              bucketProto.Tensorflow.Core.Framework.Summary
                              bucketLimitProto.Tensorflow.Core.Framework.Summary
                              BuildConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              buildConfigurationProto.Tensorflow.Core.Util.TestLog
                              buildCostModelProto.Tensorflow.Core.Protobuf.Config
                              buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
                              BundleEntryProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              BundleHeaderProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              BundleHeaderProto'BIGProto.Tensorflow.Core.Protobuf.TensorBundle
                              BundleHeaderProto'EndiannessProto.Tensorflow.Core.Protobuf.TensorBundle
                              BundleHeaderProto'LITTLEProto.Tensorflow.Core.Protobuf.TensorBundle
                              busId 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              BytesList 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              bytesList 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              cacheSizeProto.Tensorflow.Core.Util.TestLog
                              CANCELLEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              cancelOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              ccFlagsProto.Tensorflow.Core.Util.TestLog
                              changelistProto.Tensorflow.Core.Util.TestLog
                              checkpointPathProto.Tensorflow.Core.Util.Event
                              closeOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
                              ClusterDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Cluster
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Cluster
                              clusterDefProto.Tensorflow.Core.Protobuf.Config
                              CodeProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              CollectionDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              collectionDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'AnyList 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'AnyList'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'BytesList 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'BytesList'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'FloatList 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'FloatList'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'Int64List 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'Int64List'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'KindProto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'NodeList 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'NodeList'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              colorspaceProto.Tensorflow.Core.Framework.Summary
                              CommitId 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              commitIdProto.Tensorflow.Core.Util.TestLog
                              CommitId'ChangelistProto.Tensorflow.Core.Util.TestLog
                              CommitId'HashProto.Tensorflow.Core.Util.TestLog
                              CommitId'KindProto.Tensorflow.Core.Util.TestLog
                              computeCostProto.Tensorflow.Core.Framework.CostGraph
                              computeTimeProto.Tensorflow.Core.Framework.CostGraph
                              CondContextDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              ConfigProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              ConfigProto'DeviceCountEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              constantFoldingProto.Tensorflow.Core.Protobuf.RewriterConfig
                              constraintProto.Tensorflow.Core.Framework.KernelDef
                              containerProto.Tensorflow.Core.Framework.ResourceHandle
                              contentProto.Tensorflow.Core.Framework.Summary
                              contentTypeProto.Tensorflow.Core.Framework.Summary
                              contextProto.Tensorflow.Core.Example.Example
                              contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              controlInputProto.Tensorflow.Core.Framework.CostGraph
                              cooSparseProto.Tensorflow.Core.Protobuf.MetaGraph
                              costGraphProto.Tensorflow.Core.Protobuf.Config
                              CostGraphDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
                              CostGraphDef'Node 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
                              CostGraphDef'Node'InputInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
                              CostGraphDef'Node'OutputInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
                              cpuGovernorProto.Tensorflow.Core.Util.TestLog
                              CPUInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              cpuInfoProto.Tensorflow.Core.Util.TestLog
                              CPUInfo'CacheSizeEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              cpuTimeProto.Tensorflow.Core.Util.TestLog
                              crc32cProto.Tensorflow.Core.Protobuf.TensorBundle
                              data'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              DataTypeProto.Tensorflow.Core.Framework.Types
                              DATA_LOSSProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              dcomplexValProto.Tensorflow.Core.Framework.Tensor
                              DEADLINE_EXCEEDEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              debugOpsProto.Tensorflow.Core.Protobuf.Debug
                              DebugOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Debug
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Debug
                              debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              DebugTensorWatch 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Debug
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Debug
                              debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Debug
                              debugUrlsProto.Tensorflow.Core.Protobuf.Debug
                              defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
                              defaultValue 
                              1 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              deferredProto.Tensorflow.Core.Framework.LogMemory
                              deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
                              denseShapeTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              deprecationProto.Tensorflow.Core.Framework.OpDef
                              descriptionProto.Tensorflow.Core.Framework.OpDef
                              device 
                              1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
                              2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              4 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              DeviceAttributes 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              deviceCountProto.Tensorflow.Core.Protobuf.Config
                              deviceFiltersProto.Tensorflow.Core.Protobuf.Config
                              deviceInfoProto.Tensorflow.Core.Util.TestLog
                              DeviceLocality 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              devicePersistentMemorySize 
                              1 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              devicePersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
                              DeviceStepStats 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              deviceTempMemorySize 
                              1 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              deviceType 
                              1 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              2 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              devStatsProto.Tensorflow.Core.Framework.StepStats
                              dimProto.Tensorflow.Core.Framework.TensorShape
                              disableModelPruningProto.Tensorflow.Core.Protobuf.RewriterConfig
                              displayNameProto.Tensorflow.Core.Framework.Summary
                              doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
                              doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
                              doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
                              doubleValProto.Tensorflow.Core.Framework.Tensor
                              doubleValueProto.Tensorflow.Core.Util.TestLog
                              DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_Proto.Tensorflow.Core.Lib.Core.ErrorCodes
                              dtype 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Function)Proto.Tensorflow.Core.Framework.Tensor
                              3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              4 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              5 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
                              6 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
                              DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_BOOLProto.Tensorflow.Core.Framework.Types
                              DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
                              DT_DOUBLEProto.Tensorflow.Core.Framework.Types
                              DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
                              DT_FLOATProto.Tensorflow.Core.Framework.Types
                              DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
                              DT_HALFProto.Tensorflow.Core.Framework.Types
                              DT_HALF_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT16Proto.Tensorflow.Core.Framework.Types
                              DT_INT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT32Proto.Tensorflow.Core.Framework.Types
                              DT_INT32_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT64Proto.Tensorflow.Core.Framework.Types
                              DT_INT64_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT8Proto.Tensorflow.Core.Framework.Types
                              DT_INT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_INVALIDProto.Tensorflow.Core.Framework.Types
                              DT_QINT16Proto.Tensorflow.Core.Framework.Types
                              DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_QINT32Proto.Tensorflow.Core.Framework.Types
                              DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
                              DT_QINT8Proto.Tensorflow.Core.Framework.Types
                              DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_QUINT16Proto.Tensorflow.Core.Framework.Types
                              DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_QUINT8Proto.Tensorflow.Core.Framework.Types
                              DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_RESOURCEProto.Tensorflow.Core.Framework.Types
                              DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
                              DT_STRINGProto.Tensorflow.Core.Framework.Types
                              DT_STRING_REFProto.Tensorflow.Core.Framework.Types
                              DT_UINT16Proto.Tensorflow.Core.Framework.Types
                              DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_UINT8Proto.Tensorflow.Core.Framework.Types
                              DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
                              elementProto.Tensorflow.Core.Util.MemmappedFileSystem
                              enableProto.Tensorflow.Core.Protobuf.RewriterConfig
                              enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
                              enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
                              encodedAudioStringProto.Tensorflow.Core.Framework.Summary
                              encodedImageStringProto.Tensorflow.Core.Framework.Summary
                              endiannessProto.Tensorflow.Core.Protobuf.TensorBundle
                              enqueueOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              entriesProto.Tensorflow.Core.Util.TestLog
                              entryProto.Tensorflow.Core.Util.TestLog
                              EntryValue 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              EntryValue'DoubleValueProto.Tensorflow.Core.Util.TestLog
                              EntryValue'KindProto.Tensorflow.Core.Util.TestLog
                              EntryValue'StringValueProto.Tensorflow.Core.Util.TestLog
                              Event 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              Event'FileVersionProto.Tensorflow.Core.Util.Event
                              Event'GraphDefProto.Tensorflow.Core.Util.Event
                              Event'LogMessageProto.Tensorflow.Core.Util.Event
                              Event'MetaGraphDefProto.Tensorflow.Core.Util.Event
                              Event'SessionLogProto.Tensorflow.Core.Util.Event
                              Event'SummaryProto.Tensorflow.Core.Util.Event
                              Event'TaggedRunMetadataProto.Tensorflow.Core.Util.Event
                              Event'WhatProto.Tensorflow.Core.Util.Event
                              Example 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Example
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Example
                              ExampleParserConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              ExampleParserConfiguration'FeatureMapEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              explanationProto.Tensorflow.Core.Framework.OpDef
                              extentProto.Tensorflow.Core.Framework.TensorSlice
                              externalValuesProto.Tensorflow.Core.Protobuf.ControlFlow
                              extrasProto.Tensorflow.Core.Util.TestLog
                              fProto.Tensorflow.Core.Framework.AttrValue
                              FAILED_PRECONDITIONProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              Feature 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featureProto.Tensorflow.Core.Example.Feature
                              Feature'BytesListProto.Tensorflow.Core.Example.Feature
                              Feature'FloatListProto.Tensorflow.Core.Example.Feature
                              Feature'Int64ListProto.Tensorflow.Core.Example.Feature
                              Feature'KindProto.Tensorflow.Core.Example.Feature
                              FeatureConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FeatureConfiguration'ConfigProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FeatureConfiguration'FixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FeatureConfiguration'VarLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FeatureList 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featureListProto.Tensorflow.Core.Example.Feature
                              FeatureLists 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featureListsProto.Tensorflow.Core.Example.Example
                              FeatureLists'FeatureListEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featureMapProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              Features 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featuresProto.Tensorflow.Core.Example.Example
                              Features'FeatureEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              filenameProto.Tensorflow.Core.Protobuf.MetaGraph
                              filenameTensorNameProto.Tensorflow.Core.Protobuf.Saver
                              fileVersionProto.Tensorflow.Core.Util.Event
                              fixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FixedLenFeatureProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FloatList 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              floatList 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              floatValProto.Tensorflow.Core.Framework.Tensor
                              forceGpuCompatibleProto.Tensorflow.Core.Protobuf.Config
                              fullNameProto.Tensorflow.Core.Framework.Variable
                              fullShapeProto.Tensorflow.Core.Framework.Variable
                              funcProto.Tensorflow.Core.Framework.AttrValue
                              functionProto.Tensorflow.Core.Framework.Function
                              FunctionDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Function
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
                              FunctionDef'AttrEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Function
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
                              FunctionDef'RetEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Function
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
                              FunctionDefLibrary 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Function
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
                              functionNameProto.Tensorflow.Core.Framework.Function
                              globalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              globalNameProto.Tensorflow.Core.Protobuf.Config
                              globalStepProto.Tensorflow.Core.Protobuf.Debug
                              GPUInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              GPUOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              gradientProto.Tensorflow.Core.Framework.Function
                              GradientDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Function
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
                              gradientFuncProto.Tensorflow.Core.Framework.Function
                              GraphDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
                              graphDef 
                              1 (Function)Proto.Tensorflow.Core.Util.Event
                              2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              GraphOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              halfValProto.Tensorflow.Core.Framework.Tensor
                              handleProto.Tensorflow.Core.Framework.LogMemory
                              hashProto.Tensorflow.Core.Util.TestLog
                              hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
                              hasMinimumProto.Tensorflow.Core.Framework.OpDef
                              hasSingleReferenceProto.Tensorflow.Core.Framework.AllocationDescription
                              heightProto.Tensorflow.Core.Framework.Summary
                              histoProto.Tensorflow.Core.Framework.Summary
                              HistogramProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              hostMemoryArgProto.Tensorflow.Core.Framework.KernelDef
                              hostnameProto.Tensorflow.Core.Util.TestLog
                              hostPersistentMemorySize 
                              1 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              hostPersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
                              hostTempMemorySize 
                              1 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              iProto.Tensorflow.Core.Framework.AttrValue
                              idProto.Tensorflow.Core.Framework.CostGraph
                              imageProto.Tensorflow.Core.Framework.Summary
                              incarnationProto.Tensorflow.Core.Framework.DeviceAttributes
                              indexProto.Tensorflow.Core.Framework.LogMemory
                              indicesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              indicesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              inferShapesProto.Tensorflow.Core.Protobuf.Config
                              initializerNameProto.Tensorflow.Core.Framework.Variable
                              inputProto.Tensorflow.Core.Framework.NodeDef
                              inputArgProto.Tensorflow.Core.Framework.OpDef
                              inputInfoProto.Tensorflow.Core.Framework.CostGraph
                              inputsProto.Tensorflow.Core.Protobuf.MetaGraph
                              Int64List 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              int64List 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              int64ValProto.Tensorflow.Core.Framework.Tensor
                              INTERNALProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              intValProto.Tensorflow.Core.Framework.Tensor
                              INVALID_ARGUMENTProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              isAggregateProto.Tensorflow.Core.Framework.OpDef
                              isCommutativeProto.Tensorflow.Core.Framework.OpDef
                              isFinalProto.Tensorflow.Core.Framework.CostGraph
                              isRefProto.Tensorflow.Core.Framework.OpDef
                              isResourceProto.Tensorflow.Core.Framework.Variable
                              isStatefulProto.Tensorflow.Core.Framework.OpDef
                              itersProto.Tensorflow.Core.Util.TestLog
                              jobProto.Tensorflow.Core.Protobuf.Cluster
                              JobDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Cluster
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Cluster
                              JobDef'TasksEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Cluster
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Cluster
                              jobNameProto.Tensorflow.Core.Protobuf.TensorflowServer
                              keepCheckpointEveryNHoursProto.Tensorflow.Core.Protobuf.Saver
                              KernelDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.KernelDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.KernelDef
                              KernelDef'AttrConstraint 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.KernelDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.KernelDef
                              kernelNameProto.Tensorflow.Core.Framework.LogMemory
                              key 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              3 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
                              4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              5 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              6 (Function)Proto.Tensorflow.Core.Framework.Function
                              7 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              8 (Function)Proto.Tensorflow.Core.Protobuf.Config
                              9 (Function)Proto.Tensorflow.Core.Example.Feature
                              10 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              labelProto.Tensorflow.Core.Framework.KernelDef
                              lengthProto.Tensorflow.Core.Framework.TensorSlice
                              lengthFramesProto.Tensorflow.Core.Framework.Summary
                              levelProto.Tensorflow.Core.Util.Event
                              libraryProto.Tensorflow.Core.Framework.Graph
                              linkageProto.Tensorflow.Core.Util.TestLog
                              listProto.Tensorflow.Core.Framework.AttrValue
                              liveBytesProto.Tensorflow.Core.Framework.StepStats
                              localityProto.Tensorflow.Core.Framework.DeviceAttributes
                              logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
                              LogMessage 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              logMessageProto.Tensorflow.Core.Util.Event
                              LogMessage'DEBUGGINGProto.Tensorflow.Core.Util.Event
                              LogMessage'ERRORProto.Tensorflow.Core.Util.Event
                              LogMessage'FATALProto.Tensorflow.Core.Util.Event
                              LogMessage'INFOProto.Tensorflow.Core.Util.Event
                              LogMessage'LevelProto.Tensorflow.Core.Util.Event
                              LogMessage'UNKNOWNProto.Tensorflow.Core.Util.Event
                              LogMessage'WARNProto.Tensorflow.Core.Util.Event
                              loopEnterNamesProto.Tensorflow.Core.Protobuf.ControlFlow
                              loopExitNamesProto.Tensorflow.Core.Protobuf.ControlFlow
                              machineProto.Tensorflow.Core.Util.TestLog
                              MachineConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              machineConfigurationProto.Tensorflow.Core.Util.TestLog
                              maxProto.Tensorflow.Core.Framework.Summary
                              maxToKeepProto.Tensorflow.Core.Protobuf.Saver
                              maybe'allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
                              maybe'allowedValues 
                              1 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              maybe'anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'anyListProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'audioProto.Tensorflow.Core.Framework.Summary
                              maybe'autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
                              maybe'bProto.Tensorflow.Core.Framework.AttrValue
                              maybe'buildConfigurationProto.Tensorflow.Core.Util.TestLog
                              maybe'bytesList 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              maybe'changelistProto.Tensorflow.Core.Util.TestLog
                              maybe'clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
                              maybe'clusterDefProto.Tensorflow.Core.Protobuf.Config
                              maybe'commitIdProto.Tensorflow.Core.Util.TestLog
                              maybe'configProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'contextProto.Tensorflow.Core.Example.Example
                              maybe'cooSparseProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
                              maybe'cpuInfoProto.Tensorflow.Core.Util.TestLog
                              maybe'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
                              maybe'defaultValue 
                              1 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
                              maybe'doubleValueProto.Tensorflow.Core.Util.TestLog
                              maybe'encodingProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'entriesProto.Tensorflow.Core.Util.TestLog
                              maybe'fProto.Tensorflow.Core.Framework.AttrValue
                              maybe'featureListsProto.Tensorflow.Core.Example.Example
                              maybe'featuresProto.Tensorflow.Core.Example.Example
                              maybe'fileVersionProto.Tensorflow.Core.Util.Event
                              maybe'fixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'floatList 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              maybe'funcProto.Tensorflow.Core.Framework.AttrValue
                              maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'graphDef 
                              1 (Function)Proto.Tensorflow.Core.Util.Event
                              2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'hashProto.Tensorflow.Core.Util.TestLog
                              maybe'hasLengthProto.Tensorflow.Core.Framework.TensorSlice
                              maybe'histoProto.Tensorflow.Core.Framework.Summary
                              maybe'iProto.Tensorflow.Core.Framework.AttrValue
                              maybe'imageProto.Tensorflow.Core.Framework.Summary
                              maybe'int64List 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              maybe'kind 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              3 (Function)Proto.Tensorflow.Core.Example.Feature
                              maybe'lengthProto.Tensorflow.Core.Framework.TensorSlice
                              maybe'libraryProto.Tensorflow.Core.Framework.Graph
                              maybe'listProto.Tensorflow.Core.Framework.AttrValue
                              maybe'localityProto.Tensorflow.Core.Framework.DeviceAttributes
                              maybe'logMessageProto.Tensorflow.Core.Util.Event
                              maybe'machineConfigurationProto.Tensorflow.Core.Util.TestLog
                              maybe'memoryInfoProto.Tensorflow.Core.Util.TestLog
                              maybe'memoryStatsProto.Tensorflow.Core.Framework.StepStats
                              maybe'metaProto.Tensorflow.Core.Util.SavedTensorSlice
                              maybe'metadataProto.Tensorflow.Core.Framework.Summary
                              maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
                              maybe'metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'nameProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'nodeListProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
                              maybe'platformInfoProto.Tensorflow.Core.Util.TestLog
                              maybe'pluginDataProto.Tensorflow.Core.Framework.Summary
                              maybe'rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'runConfigurationProto.Tensorflow.Core.Util.TestLog
                              maybe'sProto.Tensorflow.Core.Framework.AttrValue
                              maybe'saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
                              maybe'sessionLogProto.Tensorflow.Core.Util.Event
                              maybe'shape 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              5 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
                              6 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'signatureProto.Tensorflow.Core.Framework.Function
                              maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
                              maybe'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
                              maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
                              maybe'stringValueProto.Tensorflow.Core.Util.TestLog
                              maybe'strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'summaryProto.Tensorflow.Core.Util.Event
                              maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
                              maybe'tensor 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              4 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              maybe'tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
                              maybe'tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'tensorShape 
                              1 (Function)Proto.Tensorflow.Core.Framework.Tensor
                              2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
                              maybe'value 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Framework.Summary
                              3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              4 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              5 (Function)Proto.Tensorflow.Core.Framework.Function
                              6 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              7 (Function)Proto.Tensorflow.Core.Example.Feature
                              8 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
                              maybe'varLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'versionProto.Tensorflow.Core.Protobuf.TensorBundle
                              maybe'versions 
                              1 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Function)Proto.Tensorflow.Core.Framework.Graph
                              maybe'whatProto.Tensorflow.Core.Util.Event
                              maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
                              MemmappedFileSystemDirectory 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              MemmappedFileSystemDirectoryElement 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              memoryProto.Tensorflow.Core.Framework.StepStats
                              MemoryInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              memoryInfoProto.Tensorflow.Core.Util.TestLog
                              memoryLimit 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              MemoryLogRawAllocation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogRawDeallocation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogStep 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogTensorAllocation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogTensorDeallocation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogTensorOutput 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              memoryOptimizationProto.Tensorflow.Core.Protobuf.RewriterConfig
                              MemoryStats 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              memoryStatsProto.Tensorflow.Core.Framework.StepStats
                              memoryTimeProto.Tensorflow.Core.Framework.CostGraph
                              messageProto.Tensorflow.Core.Util.Event
                              metaProto.Tensorflow.Core.Util.SavedTensorSlice
                              metadataProto.Tensorflow.Core.Framework.Summary
                              MetaGraphDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              metaGraphDefProto.Tensorflow.Core.Util.Event
                              MetaGraphDef'CollectionDefEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              MetaGraphDef'MetaInfoDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              MetaGraphDef'SignatureDefEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              metaGraphsProto.Tensorflow.Core.Protobuf.SavedModel
                              metaGraphVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              methodNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              mhzPerCpuProto.Tensorflow.Core.Util.TestLog
                              minProto.Tensorflow.Core.Framework.Summary
                              minConsumerProto.Tensorflow.Core.Framework.Versions
                              minimumProto.Tensorflow.Core.Framework.OpDef
                              modeProto.Tensorflow.Core.Util.TestLog
                              modelProto.Tensorflow.Core.Util.TestLog
                              msgProto.Tensorflow.Core.Util.Event
                              name 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              3 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
                              4 (Function)Proto.Tensorflow.Core.Framework.TensorShape
                              5 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
                              6 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              7 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              8 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              9 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              10 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              11 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              12 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              13 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              14 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              NameAttrList 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              NameAttrList'AttrEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              NamedTensorProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              node 
                              1 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Function)Proto.Tensorflow.Core.Framework.Graph
                              NodeDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
                              nodeDefProto.Tensorflow.Core.Framework.Function
                              NodeDef'AttrEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
                              NodeExecStats 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              nodeListProto.Tensorflow.Core.Protobuf.MetaGraph
                              nodeName 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.Debug
                              2 (Function)Proto.Tensorflow.Core.Framework.Summary
                              3 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              NodeOutput 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              nodeStatsProto.Tensorflow.Core.Framework.StepStats
                              NOT_FOUNDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              numProto.Tensorflow.Core.Framework.Summary
                              numberAttrProto.Tensorflow.Core.Framework.OpDef
                              numBytesProto.Tensorflow.Core.Framework.LogMemory
                              numChannelsProto.Tensorflow.Core.Framework.Summary
                              numCoresProto.Tensorflow.Core.Util.TestLog
                              numCoresAllowedProto.Tensorflow.Core.Util.TestLog
                              numReplicasProto.Tensorflow.Core.Protobuf.RewriterConfig
                              numShardsProto.Tensorflow.Core.Protobuf.TensorBundle
                              numThreadsProto.Tensorflow.Core.Protobuf.Config
                              obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              offset 
                              1 (Function)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              2 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              OKProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              op 
                              1 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              3 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef'ArgDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef'AttrDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDeprecation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              opEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              operationProto.Tensorflow.Core.Framework.LogMemory
                              operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              OpList 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              opStartRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              OptimizerOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
                              optimizersProto.Tensorflow.Core.Protobuf.RewriterConfig
                              optimizeTensorLayoutProto.Tensorflow.Core.Protobuf.RewriterConfig
                              optLevelProto.Tensorflow.Core.Protobuf.Config
                              optsProto.Tensorflow.Core.Util.TestLog
                              outputProto.Tensorflow.Core.Framework.StepStats
                              outputArgProto.Tensorflow.Core.Framework.OpDef
                              outputInfoProto.Tensorflow.Core.Framework.CostGraph
                              outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              outputsProto.Tensorflow.Core.Protobuf.MetaGraph
                              outputSlotProto.Tensorflow.Core.Protobuf.Debug
                              OUT_OF_RANGEProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              parallelIterationsProto.Tensorflow.Core.Protobuf.ControlFlow
                              partitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              peakBytesProto.Tensorflow.Core.Framework.StepStats
                              PERMISSION_DENIEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
                              physicalDescriptionProto.Tensorflow.Core.Util.TestLog
                              physicalDeviceDescProto.Tensorflow.Core.Framework.DeviceAttributes
                              pivotForBodyNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              pivotForPredNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              placeholderProto.Tensorflow.Core.Framework.AttrValue
                              placementPeriodProto.Tensorflow.Core.Protobuf.Config
                              placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
                              PlatformInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              platformInfoProto.Tensorflow.Core.Util.TestLog
                              pluginDataProto.Tensorflow.Core.Framework.Summary
                              pluginNameProto.Tensorflow.Core.Framework.Summary
                              pollingActiveDelayUsecsProto.Tensorflow.Core.Protobuf.Config
                              pollingInactiveDelayMsecsProto.Tensorflow.Core.Protobuf.Config
                              precedingNodeProto.Tensorflow.Core.Framework.CostGraph
                              precedingPortProto.Tensorflow.Core.Framework.CostGraph
                              predNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              producerProto.Tensorflow.Core.Framework.Versions
                              protocolProto.Tensorflow.Core.Protobuf.TensorflowServer
                              ptr 
                              1 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
                              2 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              queueClosedExceptionTypesProto.Tensorflow.Core.Protobuf.QueueRunner
                              queueNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              QueueRunnerDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.QueueRunner
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.QueueRunner
                              referencedTensorProto.Tensorflow.Core.Framework.StepStats
                              releaseProto.Tensorflow.Core.Util.TestLog
                              requestedBytesProto.Tensorflow.Core.Framework.AllocationDescription
                              ResourceHandleProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
                              resourceHandleValProto.Tensorflow.Core.Framework.Tensor
                              RESOURCE_EXHAUSTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              restoreOpNameProto.Tensorflow.Core.Protobuf.Saver
                              retProto.Tensorflow.Core.Framework.Function
                              rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
                              RewriterConfig 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.RewriterConfig
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.RewriterConfig
                              RewriterConfig'HEURISTICSProto.Tensorflow.Core.Protobuf.RewriterConfig
                              RewriterConfig'MANUALProto.Tensorflow.Core.Protobuf.RewriterConfig
                              RewriterConfig'MemOptTypeProto.Tensorflow.Core.Protobuf.RewriterConfig
                              RewriterConfig'NO_MEM_OPTProto.Tensorflow.Core.Protobuf.RewriterConfig
                              RPCOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              RunConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              runConfigurationProto.Tensorflow.Core.Util.TestLog
                              RunMetadata 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              runMetadataProto.Tensorflow.Core.Util.Event
                              runModeProto.Tensorflow.Core.Util.TestLog
                              RunOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
                              runTimeProto.Tensorflow.Core.Util.TestLog
                              sProto.Tensorflow.Core.Framework.AttrValue
                              sampleRateProto.Tensorflow.Core.Framework.Summary
                              SavedModel 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.SavedModel
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.SavedModel
                              savedModelSchemaVersionProto.Tensorflow.Core.Protobuf.SavedModel
                              SavedSlice 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              SavedSliceMeta 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              SavedTensorSliceMeta 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              SavedTensorSlices 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              SaverDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Saver
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Saver
                              saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              SaverDef'CheckpointFormatVersionProto.Tensorflow.Core.Protobuf.Saver
                              SaverDef'LEGACYProto.Tensorflow.Core.Protobuf.Saver
                              SaverDef'V1Proto.Tensorflow.Core.Protobuf.Saver
                              SaverDef'V2Proto.Tensorflow.Core.Protobuf.Saver
                              SaveSliceInfoDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Variable
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Variable
                              saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
                              saveTensorNameProto.Tensorflow.Core.Protobuf.Saver
                              scheduledMicrosProto.Tensorflow.Core.Framework.StepStats
                              scomplexValProto.Tensorflow.Core.Framework.Tensor
                              SequenceExample 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Example
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Example
                              serialIdentifierProto.Tensorflow.Core.Util.TestLog
                              ServerDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorflowServer
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorflowServer
                              sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              SessionLog 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              sessionLogProto.Tensorflow.Core.Util.Event
                              SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
                              SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
                              SessionLog'STARTProto.Tensorflow.Core.Util.Event
                              SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
                              SessionLog'STOPProto.Tensorflow.Core.Util.Event
                              shape 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              5 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
                              6 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              shapesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              shardedProto.Tensorflow.Core.Protobuf.Saver
                              shardIdProto.Tensorflow.Core.Protobuf.TensorBundle
                              signatureProto.Tensorflow.Core.Framework.Function
                              SignatureDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              signatureDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              SignatureDef'InputsEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              SignatureDef'OutputsEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              simpleValueProto.Tensorflow.Core.Framework.Summary
                              size 
                              1 (Function)Proto.Tensorflow.Core.Framework.TensorShape
                              2 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              sliceProto.Tensorflow.Core.Util.SavedTensorSlice
                              slicesProto.Tensorflow.Core.Protobuf.TensorBundle
                              slotProto.Tensorflow.Core.Framework.StepStats
                              snapshotProto.Tensorflow.Core.Util.TestLog
                              snapshotNameProto.Tensorflow.Core.Framework.Variable
                              startProto.Tensorflow.Core.Framework.TensorSlice
                              startTimeProto.Tensorflow.Core.Util.TestLog
                              statusProto.Tensorflow.Core.Util.Event
                              stepProto.Tensorflow.Core.Util.Event
                              stepIdProto.Tensorflow.Core.Framework.LogMemory
                              StepStats 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              stepStatsProto.Tensorflow.Core.Protobuf.Config
                              stringValProto.Tensorflow.Core.Framework.Tensor
                              stringValueProto.Tensorflow.Core.Util.TestLog
                              strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
                              sumProto.Tensorflow.Core.Framework.Summary
                              Summary 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              summary 
                              1 (Function)Proto.Tensorflow.Core.Util.Event
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              Summary'Audio 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Image 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Value 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Value'AudioProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'HistoProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'ImageProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'ObsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'SimpleValueProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'TensorProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'ValueProto.Tensorflow.Core.Framework.Summary
                              SummaryDescription 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              summaryDescriptionProto.Tensorflow.Core.Framework.Summary
                              SummaryMetadata 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              SummaryMetadata'PluginData 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              sumSquaresProto.Tensorflow.Core.Framework.Summary
                              swapMemoryProto.Tensorflow.Core.Protobuf.ControlFlow
                              systemProto.Tensorflow.Core.Util.TestLog
                              tag 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Util.Event
                              TaggedRunMetadata 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              taggedRunMetadataProto.Tensorflow.Core.Util.Event
                              tagsProto.Tensorflow.Core.Protobuf.MetaGraph
                              targetProto.Tensorflow.Core.Util.TestLog
                              taskIndexProto.Tensorflow.Core.Protobuf.TensorflowServer
                              tasksProto.Tensorflow.Core.Protobuf.Cluster
                              temporaryMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              tensor 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              3 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              5 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              tensorContentProto.Tensorflow.Core.Framework.Tensor
                              TensorDescription 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorDescription
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorDescription
                              tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
                              tensorflowGitVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              tensorflowVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo'CooSparse 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo'CooSparse'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo'EncodingProto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo'NameProto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
                              tensorShape 
                              1 (Function)Proto.Tensorflow.Core.Framework.Tensor
                              2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorShapeProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
                              TensorShapeProto'Dim 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
                              TensorSliceProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorSlice
                              TensorSliceProto'Extent 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorSlice
                              TensorSliceProto'Extent'HasLengthProto.Tensorflow.Core.Framework.TensorSlice
                              TensorSliceProto'Extent'LengthProto.Tensorflow.Core.Framework.TensorSlice
                              TestResults 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              TestResults'ANDROID_BENCHMARKProto.Tensorflow.Core.Util.TestLog
                              TestResults'BenchmarkTypeProto.Tensorflow.Core.Util.TestLog
                              TestResults'CPP_MICROBENCHMARKProto.Tensorflow.Core.Util.TestLog
                              TestResults'PYTHON_BENCHMARKProto.Tensorflow.Core.Util.TestLog
                              TestResults'UNKNOWNProto.Tensorflow.Core.Util.TestLog
                              threadIdProto.Tensorflow.Core.Framework.StepStats
                              ThreadPoolOptionProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              throughputProto.Tensorflow.Core.Util.TestLog
                              timelineLabelProto.Tensorflow.Core.Framework.StepStats
                              timelineStepProto.Tensorflow.Core.Protobuf.Config
                              timeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              tolerateDebugOpCreationFailuresProto.Tensorflow.Core.Protobuf.Debug
                              totalProto.Tensorflow.Core.Util.TestLog
                              totalBytesProto.Tensorflow.Core.Framework.StepStats
                              traceLevelProto.Tensorflow.Core.Protobuf.Config
                              type' 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              4 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              typeAttrProto.Tensorflow.Core.Framework.OpDef
                              typeHintProto.Tensorflow.Core.Framework.Summary
                              typeListAttrProto.Tensorflow.Core.Framework.OpDef
                              UNAUTHENTICATEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              UNAVAILABLEProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              UNIMPLEMENTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              UNKNOWNProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              unknownRankProto.Tensorflow.Core.Framework.TensorShape
                              usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
                              useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
                              uuidProto.Tensorflow.Core.Util.TestLog
                              value 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              3 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
                              4 (Function)Proto.Tensorflow.Core.Framework.Summary
                              5 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              6 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              7 (Function)Proto.Tensorflow.Core.Framework.Function
                              8 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              9 (Function)Proto.Tensorflow.Core.Protobuf.Config
                              10 (Function)Proto.Tensorflow.Core.Example.Feature
                              11 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              valuesProto.Tensorflow.Core.Protobuf.ControlFlow
                              ValuesDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
                              ValuesDef'ExternalValuesEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              valuesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              VariableDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Variable
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Variable
                              variableNameProto.Tensorflow.Core.Framework.Variable
                              varLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              VarLenFeatureProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              varOffsetProto.Tensorflow.Core.Framework.Variable
                              varShapeProto.Tensorflow.Core.Framework.Variable
                              version 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Protobuf.Saver
                              3 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              4 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              5 (Function)Proto.Tensorflow.Core.Framework.Graph
                              VersionDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Versions
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Versions
                              versionNumberProto.Tensorflow.Core.Framework.Tensor
                              versions 
                              1 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Function)Proto.Tensorflow.Core.Framework.Graph
                              visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
                              wallTime 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Util.Event
                              WhileContextDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              widthProto.Tensorflow.Core.Framework.Summary
                              _AllocationDescription'allocatedBytesProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'allocationIdProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'allocatorNameProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'hasSingleReferenceProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'ptrProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocationDescription'requestedBytesProto.Tensorflow.Core.Framework.AllocationDescription
                              _AllocatorMemoryUsed'allocatorBytesInUseProto.Tensorflow.Core.Framework.StepStats
                              _AllocatorMemoryUsed'allocatorNameProto.Tensorflow.Core.Framework.StepStats
                              _AllocatorMemoryUsed'liveBytesProto.Tensorflow.Core.Framework.StepStats
                              _AllocatorMemoryUsed'peakBytesProto.Tensorflow.Core.Framework.StepStats
                              _AllocatorMemoryUsed'totalBytesProto.Tensorflow.Core.Framework.StepStats
                              _AssetFileDef'filenameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _AssetFileDef'tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
                              _AttrValue'valueProto.Tensorflow.Core.Framework.AttrValue
                              _AutoParallelOptions'enableProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _AutoParallelOptions'numReplicasProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _AvailableDeviceInfo'memoryLimitProto.Tensorflow.Core.Util.TestLog
                              _AvailableDeviceInfo'nameProto.Tensorflow.Core.Util.TestLog
                              _AvailableDeviceInfo'physicalDescriptionProto.Tensorflow.Core.Util.TestLog
                              _AvailableDeviceInfo'type'Proto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntries'entryProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'cpuTimeProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'extrasProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'ExtrasEntry'keyProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'ExtrasEntry'valueProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'itersProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'nameProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'throughputProto.Tensorflow.Core.Util.TestLog
                              _BenchmarkEntry'wallTimeProto.Tensorflow.Core.Util.TestLog
                              _BuildConfiguration'ccFlagsProto.Tensorflow.Core.Util.TestLog
                              _BuildConfiguration'modeProto.Tensorflow.Core.Util.TestLog
                              _BuildConfiguration'optsProto.Tensorflow.Core.Util.TestLog
                              _BundleEntryProto'crc32cProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'dtypeProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'offsetProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'shapeProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'shardIdProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'sizeProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleEntryProto'slicesProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleHeaderProto'endiannessProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleHeaderProto'numShardsProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BundleHeaderProto'versionProto.Tensorflow.Core.Protobuf.TensorBundle
                              _BytesList'valueProto.Tensorflow.Core.Example.Feature
                              _ClusterDef'jobProto.Tensorflow.Core.Protobuf.Cluster
                              _CollectionDef'AnyList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'BytesList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'FloatList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'Int64List'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'kindProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CollectionDef'NodeList'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _CommitId'kindProto.Tensorflow.Core.Util.TestLog
                              _CommitId'snapshotProto.Tensorflow.Core.Util.TestLog
                              _CondContextDef'branchProto.Tensorflow.Core.Protobuf.ControlFlow
                              _CondContextDef'contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _CondContextDef'pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _CondContextDef'predNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _CondContextDef'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
                              _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'clusterDefProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
                              _CostGraphDef'nodeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'computeCostProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'computeTimeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'controlInputProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'deviceProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'devicePersistentMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'deviceTempMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'hostPersistentMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'hostTempMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'idProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'inputInfoProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'InputInfo'precedingNodeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'InputInfo'precedingPortProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'isFinalProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'memoryTimeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'nameProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'outputInfoProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'OutputInfo'aliasInputPortProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'OutputInfo'dtypeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'OutputInfo'shapeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'OutputInfo'sizeProto.Tensorflow.Core.Framework.CostGraph
                              _CostGraphDef'Node'temporaryMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              _CPUInfo'cacheSizeProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'CacheSizeEntry'keyProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'CacheSizeEntry'valueProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'cpuGovernorProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'cpuInfoProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'mhzPerCpuProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'numCoresProto.Tensorflow.Core.Util.TestLog
                              _CPUInfo'numCoresAllowedProto.Tensorflow.Core.Util.TestLog
                              _DebugOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Debug
                              _DebugOptions'globalStepProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Debug
                              _DebugTensorWatch'tolerateDebugOpCreationFailuresProto.Tensorflow.Core.Protobuf.Debug
                              _DeviceAttributes'deviceTypeProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'incarnationProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'localityProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'memoryLimitProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'nameProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceAttributes'physicalDeviceDescProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceLocality'busIdProto.Tensorflow.Core.Framework.DeviceAttributes
                              _DeviceStepStats'deviceProto.Tensorflow.Core.Framework.StepStats
                              _DeviceStepStats'nodeStatsProto.Tensorflow.Core.Framework.StepStats
                              _EntryValue'kindProto.Tensorflow.Core.Util.TestLog
                              _Event'stepProto.Tensorflow.Core.Util.Event
                              _Event'wallTimeProto.Tensorflow.Core.Util.Event
                              _Event'whatProto.Tensorflow.Core.Util.Event
                              _Example'featuresProto.Tensorflow.Core.Example.Example
                              _ExampleParserConfiguration'featureMapProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _ExampleParserConfiguration'FeatureMapEntry'keyProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _ExampleParserConfiguration'FeatureMapEntry'valueProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _Feature'kindProto.Tensorflow.Core.Example.Feature
                              _FeatureConfiguration'configProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FeatureList'featureProto.Tensorflow.Core.Example.Feature
                              _FeatureLists'featureListProto.Tensorflow.Core.Example.Feature
                              _FeatureLists'FeatureListEntry'keyProto.Tensorflow.Core.Example.Feature
                              _FeatureLists'FeatureListEntry'valueProto.Tensorflow.Core.Example.Feature
                              _Features'featureProto.Tensorflow.Core.Example.Feature
                              _Features'FeatureEntry'keyProto.Tensorflow.Core.Example.Feature
                              _Features'FeatureEntry'valueProto.Tensorflow.Core.Example.Feature
                              _FixedLenFeatureProto'defaultValueProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FixedLenFeatureProto'dtypeProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FixedLenFeatureProto'shapeProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FixedLenFeatureProto'valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _FloatList'valueProto.Tensorflow.Core.Example.Feature
                              _FunctionDef'attrProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'AttrEntry'keyProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'AttrEntry'valueProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'nodeDefProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'retProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'RetEntry'keyProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'RetEntry'valueProto.Tensorflow.Core.Framework.Function
                              _FunctionDef'signatureProto.Tensorflow.Core.Framework.Function
                              _FunctionDefLibrary'functionProto.Tensorflow.Core.Framework.Function
                              _FunctionDefLibrary'gradientProto.Tensorflow.Core.Framework.Function
                              _GPUInfo'busIdProto.Tensorflow.Core.Util.TestLog
                              _GPUInfo'modelProto.Tensorflow.Core.Util.TestLog
                              _GPUInfo'uuidProto.Tensorflow.Core.Util.TestLog
                              _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'forceGpuCompatibleProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'pollingActiveDelayUsecsProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'pollingInactiveDelayMsecsProto.Tensorflow.Core.Protobuf.Config
                              _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
                              _GradientDef'functionNameProto.Tensorflow.Core.Framework.Function
                              _GradientDef'gradientFuncProto.Tensorflow.Core.Framework.Function
                              _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
                              _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
                              _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
                              _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
                              _HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'minProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'numProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
                              _HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
                              _Int64List'valueProto.Tensorflow.Core.Example.Feature
                              _JobDef'nameProto.Tensorflow.Core.Protobuf.Cluster
                              _JobDef'tasksProto.Tensorflow.Core.Protobuf.Cluster
                              _JobDef'TasksEntry'keyProto.Tensorflow.Core.Protobuf.Cluster
                              _JobDef'TasksEntry'valueProto.Tensorflow.Core.Protobuf.Cluster
                              _KernelDef'AttrConstraint'allowedValuesProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'AttrConstraint'nameProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'constraintProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'deviceTypeProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'hostMemoryArgProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'labelProto.Tensorflow.Core.Framework.KernelDef
                              _KernelDef'opProto.Tensorflow.Core.Framework.KernelDef
                              _LogMessage'levelProto.Tensorflow.Core.Util.Event
                              _LogMessage'messageProto.Tensorflow.Core.Util.Event
                              _MachineConfiguration'availableDeviceInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'cpuInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'deviceInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'hostnameProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'memoryInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'platformInfoProto.Tensorflow.Core.Util.TestLog
                              _MachineConfiguration'serialIdentifierProto.Tensorflow.Core.Util.TestLog
                              _MemmappedFileSystemDirectory'elementProto.Tensorflow.Core.Util.MemmappedFileSystem
                              _MemmappedFileSystemDirectoryElement'nameProto.Tensorflow.Core.Util.MemmappedFileSystem
                              _MemmappedFileSystemDirectoryElement'offsetProto.Tensorflow.Core.Util.MemmappedFileSystem
                              _MemoryInfo'availableProto.Tensorflow.Core.Util.TestLog
                              _MemoryInfo'totalProto.Tensorflow.Core.Util.TestLog
                              _MemoryLogRawAllocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'numBytesProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'operationProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'ptrProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawAllocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'deferredProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'operationProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogRawDeallocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogStep'handleProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogStep'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorAllocation'kernelNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorAllocation'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorAllocation'tensorProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorDeallocation'allocationIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorDeallocation'allocatorNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorOutput'indexProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorOutput'kernelNameProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorOutput'stepIdProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryLogTensorOutput'tensorProto.Tensorflow.Core.Framework.LogMemory
                              _MemoryStats'devicePersistentMemorySizeProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'devicePersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'deviceTempMemorySizeProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'hostPersistentMemorySizeProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'hostPersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
                              _MemoryStats'hostTempMemorySizeProto.Tensorflow.Core.Framework.StepStats
                              _MetaGraphDef'assetFileDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'collectionDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'CollectionDefEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'CollectionDefEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'graphDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'metaGraphVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'tagsProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'tensorflowGitVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'MetaInfoDef'tensorflowVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'signatureDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'SignatureDefEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
                              _MetaGraphDef'SignatureDefEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
                              _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
                              _NamedTensorProto'nameProto.Tensorflow.Core.Protobuf.NamedTensor
                              _NamedTensorProto'tensorProto.Tensorflow.Core.Protobuf.NamedTensor
                              _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
                              _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
                              _NodeExecStats'allEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'allStartMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'memoryProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'memoryStatsProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'nodeNameProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'opEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'opStartRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'outputProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'referencedTensorProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'scheduledMicrosProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'threadIdProto.Tensorflow.Core.Framework.StepStats
                              _NodeExecStats'timelineLabelProto.Tensorflow.Core.Framework.StepStats
                              _NodeOutput'slotProto.Tensorflow.Core.Framework.StepStats
                              _NodeOutput'tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
                              _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
                              _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
                              _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
                              _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
                              _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
                              _OpList'opProto.Tensorflow.Core.Framework.OpDef
                              _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
                              _PlatformInfo'bitsProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'linkageProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'machineProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'releaseProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'systemProto.Tensorflow.Core.Util.TestLog
                              _PlatformInfo'versionProto.Tensorflow.Core.Util.TestLog
                              _QueueRunnerDef'cancelOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              _QueueRunnerDef'closeOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              _QueueRunnerDef'enqueueOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              _QueueRunnerDef'queueClosedExceptionTypesProto.Tensorflow.Core.Protobuf.QueueRunner
                              _QueueRunnerDef'queueNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              _ResourceHandleProto'containerProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandleProto'deviceProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandleProto'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandleProto'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
                              _ResourceHandleProto'nameProto.Tensorflow.Core.Framework.ResourceHandle
                              _RewriterConfig'autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'constantFoldingProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'disableModelPruningProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'memoryOptimizationProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'optimizersProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RewriterConfig'optimizeTensorLayoutProto.Tensorflow.Core.Protobuf.RewriterConfig
                              _RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
                              _RunConfiguration'argumentProto.Tensorflow.Core.Util.TestLog
                              _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
                              _SavedModel'metaGraphsProto.Tensorflow.Core.Protobuf.SavedModel
                              _SavedModel'savedModelSchemaVersionProto.Tensorflow.Core.Protobuf.SavedModel
                              _SavedSlice'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSlice'nameProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSlice'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSliceMeta'nameProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSliceMeta'shapeProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSliceMeta'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedSliceMeta'type'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedTensorSliceMeta'tensorProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedTensorSliceMeta'versionsProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedTensorSlices'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              _SavedTensorSlices'metaProto.Tensorflow.Core.Util.SavedTensorSlice
                              _SaverDef'filenameTensorNameProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'keepCheckpointEveryNHoursProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'maxToKeepProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'restoreOpNameProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'saveTensorNameProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'shardedProto.Tensorflow.Core.Protobuf.Saver
                              _SaverDef'versionProto.Tensorflow.Core.Protobuf.Saver
                              _SaveSliceInfoDef'fullNameProto.Tensorflow.Core.Framework.Variable
                              _SaveSliceInfoDef'fullShapeProto.Tensorflow.Core.Framework.Variable
                              _SaveSliceInfoDef'varOffsetProto.Tensorflow.Core.Framework.Variable
                              _SaveSliceInfoDef'varShapeProto.Tensorflow.Core.Framework.Variable
                              _SequenceExample'contextProto.Tensorflow.Core.Example.Example
                              _SequenceExample'featureListsProto.Tensorflow.Core.Example.Example
                              _ServerDef'clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _ServerDef'defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _ServerDef'jobNameProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _ServerDef'protocolProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _ServerDef'taskIndexProto.Tensorflow.Core.Protobuf.TensorflowServer
                              _SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
                              _SessionLog'msgProto.Tensorflow.Core.Util.Event
                              _SessionLog'statusProto.Tensorflow.Core.Util.Event
                              _SignatureDef'inputsProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'InputsEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'InputsEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'methodNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'outputsProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'OutputsEntry'keyProto.Tensorflow.Core.Protobuf.MetaGraph
                              _SignatureDef'OutputsEntry'valueProto.Tensorflow.Core.Protobuf.MetaGraph
                              _StepStats'devStatsProto.Tensorflow.Core.Framework.StepStats
                              _Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
                              _Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
                              _Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
                              _Summary'valueProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'metadataProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
                              _Summary'Value'valueProto.Tensorflow.Core.Framework.Summary
                              _SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'displayNameProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'pluginDataProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'PluginData'contentProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'PluginData'pluginNameProto.Tensorflow.Core.Framework.Summary
                              _SummaryMetadata'summaryDescriptionProto.Tensorflow.Core.Framework.Summary
                              _TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
                              _TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
                              _TensorDescription'allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
                              _TensorDescription'dtypeProto.Tensorflow.Core.Framework.TensorDescription
                              _TensorDescription'shapeProto.Tensorflow.Core.Framework.TensorDescription
                              _TensorInfo'CooSparse'denseShapeTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'CooSparse'indicesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'CooSparse'valuesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'dtypeProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'encodingProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorInfo'tensorShapeProto.Tensorflow.Core.Protobuf.MetaGraph
                              _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
                              _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
                              _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
                              _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
                              _TensorSliceProto'extentProto.Tensorflow.Core.Framework.TensorSlice
                              _TensorSliceProto'Extent'hasLengthProto.Tensorflow.Core.Framework.TensorSlice
                              _TensorSliceProto'Extent'startProto.Tensorflow.Core.Framework.TensorSlice
                              _TestResults'benchmarkTypeProto.Tensorflow.Core.Util.TestLog
                              _TestResults'buildConfigurationProto.Tensorflow.Core.Util.TestLog
                              _TestResults'commitIdProto.Tensorflow.Core.Util.TestLog
                              _TestResults'entriesProto.Tensorflow.Core.Util.TestLog
                              _TestResults'machineConfigurationProto.Tensorflow.Core.Util.TestLog
                              _TestResults'nameProto.Tensorflow.Core.Util.TestLog
                              _TestResults'runConfigurationProto.Tensorflow.Core.Util.TestLog
                              _TestResults'runModeProto.Tensorflow.Core.Util.TestLog
                              _TestResults'runTimeProto.Tensorflow.Core.Util.TestLog
                              _TestResults'startTimeProto.Tensorflow.Core.Util.TestLog
                              _TestResults'targetProto.Tensorflow.Core.Util.TestLog
                              _ThreadPoolOptionProto'globalNameProto.Tensorflow.Core.Protobuf.Config
                              _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
                              _ValuesDef'externalValuesProto.Tensorflow.Core.Protobuf.ControlFlow
                              _ValuesDef'ExternalValuesEntry'keyProto.Tensorflow.Core.Protobuf.ControlFlow
                              _ValuesDef'ExternalValuesEntry'valueProto.Tensorflow.Core.Protobuf.ControlFlow
                              _ValuesDef'valuesProto.Tensorflow.Core.Protobuf.ControlFlow
                              _VariableDef'initializerNameProto.Tensorflow.Core.Framework.Variable
                              _VariableDef'isResourceProto.Tensorflow.Core.Framework.Variable
                              _VariableDef'saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
                              _VariableDef'snapshotNameProto.Tensorflow.Core.Framework.Variable
                              _VariableDef'variableNameProto.Tensorflow.Core.Framework.Variable
                              _VarLenFeatureProto'dtypeProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _VarLenFeatureProto'indicesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _VarLenFeatureProto'shapesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _VarLenFeatureProto'valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              _VersionDef'badConsumersProto.Tensorflow.Core.Framework.Versions
                              _VersionDef'minConsumerProto.Tensorflow.Core.Framework.Versions
                              _VersionDef'producerProto.Tensorflow.Core.Framework.Versions
                              _WhileContextDef'backPropProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'loopEnterNamesProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'loopExitNamesProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'parallelIterationsProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'pivotForBodyNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'pivotForPredNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'swapMemoryProto.Tensorflow.Core.Protobuf.ControlFlow
                              _WhileContextDef'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html index d24203f..8c4bac3 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - B)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - B

                              bProto.Tensorflow.Core.Framework.AttrValue
                              backPropProto.Tensorflow.Core.Protobuf.ControlFlow
                              badConsumersProto.Tensorflow.Core.Framework.Versions
                              BenchmarkEntries 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              BenchmarkEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              BenchmarkEntry'ExtrasEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              benchmarkTypeProto.Tensorflow.Core.Util.TestLog
                              bitsProto.Tensorflow.Core.Util.TestLog
                              boolValProto.Tensorflow.Core.Framework.Tensor
                              branchProto.Tensorflow.Core.Protobuf.ControlFlow
                              bucketProto.Tensorflow.Core.Framework.Summary
                              bucketLimitProto.Tensorflow.Core.Framework.Summary
                              BuildConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              buildConfigurationProto.Tensorflow.Core.Util.TestLog
                              buildCostModelProto.Tensorflow.Core.Protobuf.Config
                              buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
                              BundleEntryProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              BundleHeaderProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              BundleHeaderProto'BIGProto.Tensorflow.Core.Protobuf.TensorBundle
                              BundleHeaderProto'EndiannessProto.Tensorflow.Core.Protobuf.TensorBundle
                              BundleHeaderProto'LITTLEProto.Tensorflow.Core.Protobuf.TensorBundle
                              busId 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              BytesList 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              bytesList 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html index fda7550..9852abb 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - C)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - C

                              cacheSizeProto.Tensorflow.Core.Util.TestLog
                              CANCELLEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              cancelOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              ccFlagsProto.Tensorflow.Core.Util.TestLog
                              changelistProto.Tensorflow.Core.Util.TestLog
                              checkpointPathProto.Tensorflow.Core.Util.Event
                              closeOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
                              ClusterDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Cluster
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Cluster
                              clusterDefProto.Tensorflow.Core.Protobuf.Config
                              CodeProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              CollectionDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              collectionDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'AnyList 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'AnyList'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'BytesList 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'BytesList'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'FloatList 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'FloatList'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'Int64List 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'Int64List'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'KindProto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'NodeList 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              CollectionDef'NodeList'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              colorspaceProto.Tensorflow.Core.Framework.Summary
                              CommitId 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              commitIdProto.Tensorflow.Core.Util.TestLog
                              CommitId'ChangelistProto.Tensorflow.Core.Util.TestLog
                              CommitId'HashProto.Tensorflow.Core.Util.TestLog
                              CommitId'KindProto.Tensorflow.Core.Util.TestLog
                              computeCostProto.Tensorflow.Core.Framework.CostGraph
                              computeTimeProto.Tensorflow.Core.Framework.CostGraph
                              CondContextDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              ConfigProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              ConfigProto'DeviceCountEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              constantFoldingProto.Tensorflow.Core.Protobuf.RewriterConfig
                              constraintProto.Tensorflow.Core.Framework.KernelDef
                              containerProto.Tensorflow.Core.Framework.ResourceHandle
                              contentProto.Tensorflow.Core.Framework.Summary
                              contentTypeProto.Tensorflow.Core.Framework.Summary
                              contextProto.Tensorflow.Core.Example.Example
                              contextNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              controlInputProto.Tensorflow.Core.Framework.CostGraph
                              cooSparseProto.Tensorflow.Core.Protobuf.MetaGraph
                              costGraphProto.Tensorflow.Core.Protobuf.Config
                              CostGraphDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
                              CostGraphDef'Node 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
                              CostGraphDef'Node'InputInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
                              CostGraphDef'Node'OutputInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.CostGraph
                              cpuGovernorProto.Tensorflow.Core.Util.TestLog
                              CPUInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              cpuInfoProto.Tensorflow.Core.Util.TestLog
                              CPUInfo'CacheSizeEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              cpuTimeProto.Tensorflow.Core.Util.TestLog
                              crc32cProto.Tensorflow.Core.Protobuf.TensorBundle
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html index ab1367b..eaa147d 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - D)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - D

                              DataTypeProto.Tensorflow.Core.Framework.Types
                              dcomplexValProto.Tensorflow.Core.Framework.Tensor
                              debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              defaultValueProto.Tensorflow.Core.Framework.OpDef
                              deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
                              deprecationProto.Tensorflow.Core.Framework.OpDef
                              descriptionProto.Tensorflow.Core.Framework.OpDef
                              device 
                              1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              deviceCountProto.Tensorflow.Core.Protobuf.Config
                              deviceFiltersProto.Tensorflow.Core.Protobuf.Config
                              dimProto.Tensorflow.Core.Framework.TensorShape
                              doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
                              doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
                              doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
                              doubleValProto.Tensorflow.Core.Framework.Tensor
                              dtypeProto.Tensorflow.Core.Framework.Tensor
                              DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
                              DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_BOOLProto.Tensorflow.Core.Framework.Types
                              DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
                              DT_DOUBLEProto.Tensorflow.Core.Framework.Types
                              DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
                              DT_FLOATProto.Tensorflow.Core.Framework.Types
                              DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
                              DT_HALFProto.Tensorflow.Core.Framework.Types
                              DT_HALF_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT16Proto.Tensorflow.Core.Framework.Types
                              DT_INT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT32Proto.Tensorflow.Core.Framework.Types
                              DT_INT32_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT64Proto.Tensorflow.Core.Framework.Types
                              DT_INT64_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT8Proto.Tensorflow.Core.Framework.Types
                              DT_INT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_INVALIDProto.Tensorflow.Core.Framework.Types
                              DT_QINT16Proto.Tensorflow.Core.Framework.Types
                              DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_QINT32Proto.Tensorflow.Core.Framework.Types
                              DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
                              DT_QINT8Proto.Tensorflow.Core.Framework.Types
                              DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_QUINT16Proto.Tensorflow.Core.Framework.Types
                              DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_QUINT8Proto.Tensorflow.Core.Framework.Types
                              DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_RESOURCEProto.Tensorflow.Core.Framework.Types
                              DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
                              DT_STRINGProto.Tensorflow.Core.Framework.Types
                              DT_STRING_REFProto.Tensorflow.Core.Framework.Types
                              DT_UINT16Proto.Tensorflow.Core.Framework.Types
                              DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_UINT8Proto.Tensorflow.Core.Framework.Types
                              DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - D

                              data'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              DataTypeProto.Tensorflow.Core.Framework.Types
                              DATA_LOSSProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              dcomplexValProto.Tensorflow.Core.Framework.Tensor
                              DEADLINE_EXCEEDEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              debugOpsProto.Tensorflow.Core.Protobuf.Debug
                              DebugOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Debug
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Debug
                              debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              DebugTensorWatch 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Debug
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Debug
                              debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Debug
                              debugUrlsProto.Tensorflow.Core.Protobuf.Debug
                              defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
                              defaultValue 
                              1 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              deferredProto.Tensorflow.Core.Framework.LogMemory
                              deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
                              denseShapeTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              deprecationProto.Tensorflow.Core.Framework.OpDef
                              descriptionProto.Tensorflow.Core.Framework.OpDef
                              device 
                              1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
                              2 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              4 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              DeviceAttributes 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              deviceCountProto.Tensorflow.Core.Protobuf.Config
                              deviceFiltersProto.Tensorflow.Core.Protobuf.Config
                              deviceInfoProto.Tensorflow.Core.Util.TestLog
                              DeviceLocality 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              devicePersistentMemorySize 
                              1 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              devicePersistentTensorAllocIdsProto.Tensorflow.Core.Framework.StepStats
                              DeviceStepStats 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              deviceTempMemorySize 
                              1 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              deviceType 
                              1 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              2 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              devStatsProto.Tensorflow.Core.Framework.StepStats
                              dimProto.Tensorflow.Core.Framework.TensorShape
                              disableModelPruningProto.Tensorflow.Core.Protobuf.RewriterConfig
                              displayNameProto.Tensorflow.Core.Framework.Summary
                              doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
                              doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
                              doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
                              doubleValProto.Tensorflow.Core.Framework.Tensor
                              doubleValueProto.Tensorflow.Core.Util.TestLog
                              DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_Proto.Tensorflow.Core.Lib.Core.ErrorCodes
                              dtype 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Function)Proto.Tensorflow.Core.Framework.Tensor
                              3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              4 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              5 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
                              6 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
                              DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_BOOLProto.Tensorflow.Core.Framework.Types
                              DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
                              DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
                              DT_DOUBLEProto.Tensorflow.Core.Framework.Types
                              DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
                              DT_FLOATProto.Tensorflow.Core.Framework.Types
                              DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
                              DT_HALFProto.Tensorflow.Core.Framework.Types
                              DT_HALF_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT16Proto.Tensorflow.Core.Framework.Types
                              DT_INT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT32Proto.Tensorflow.Core.Framework.Types
                              DT_INT32_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT64Proto.Tensorflow.Core.Framework.Types
                              DT_INT64_REFProto.Tensorflow.Core.Framework.Types
                              DT_INT8Proto.Tensorflow.Core.Framework.Types
                              DT_INT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_INVALIDProto.Tensorflow.Core.Framework.Types
                              DT_QINT16Proto.Tensorflow.Core.Framework.Types
                              DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_QINT32Proto.Tensorflow.Core.Framework.Types
                              DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
                              DT_QINT8Proto.Tensorflow.Core.Framework.Types
                              DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_QUINT16Proto.Tensorflow.Core.Framework.Types
                              DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_QUINT8Proto.Tensorflow.Core.Framework.Types
                              DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
                              DT_RESOURCEProto.Tensorflow.Core.Framework.Types
                              DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
                              DT_STRINGProto.Tensorflow.Core.Framework.Types
                              DT_STRING_REFProto.Tensorflow.Core.Framework.Types
                              DT_UINT16Proto.Tensorflow.Core.Framework.Types
                              DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
                              DT_UINT8Proto.Tensorflow.Core.Framework.Types
                              DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html index 09068b6..5124ecb 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - E)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - E

                              elementProto.Tensorflow.Core.Util.MemmappedFileSystem
                              enableProto.Tensorflow.Core.Protobuf.RewriterConfig
                              enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
                              enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
                              encodedAudioStringProto.Tensorflow.Core.Framework.Summary
                              encodedImageStringProto.Tensorflow.Core.Framework.Summary
                              endiannessProto.Tensorflow.Core.Protobuf.TensorBundle
                              enqueueOpNameProto.Tensorflow.Core.Protobuf.QueueRunner
                              entriesProto.Tensorflow.Core.Util.TestLog
                              entryProto.Tensorflow.Core.Util.TestLog
                              EntryValue 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              EntryValue'DoubleValueProto.Tensorflow.Core.Util.TestLog
                              EntryValue'KindProto.Tensorflow.Core.Util.TestLog
                              EntryValue'StringValueProto.Tensorflow.Core.Util.TestLog
                              Event 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              Event'FileVersionProto.Tensorflow.Core.Util.Event
                              Event'GraphDefProto.Tensorflow.Core.Util.Event
                              Event'LogMessageProto.Tensorflow.Core.Util.Event
                              Event'MetaGraphDefProto.Tensorflow.Core.Util.Event
                              Event'SessionLogProto.Tensorflow.Core.Util.Event
                              Event'SummaryProto.Tensorflow.Core.Util.Event
                              Event'TaggedRunMetadataProto.Tensorflow.Core.Util.Event
                              Event'WhatProto.Tensorflow.Core.Util.Event
                              Example 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Example
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Example
                              ExampleParserConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              ExampleParserConfiguration'FeatureMapEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              explanationProto.Tensorflow.Core.Framework.OpDef
                              extentProto.Tensorflow.Core.Framework.TensorSlice
                              externalValuesProto.Tensorflow.Core.Protobuf.ControlFlow
                              extrasProto.Tensorflow.Core.Util.TestLog
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html index 6c182e3..a095f6f 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - F)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - F

                              fProto.Tensorflow.Core.Framework.AttrValue
                              FAILED_PRECONDITIONProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              Feature 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featureProto.Tensorflow.Core.Example.Feature
                              Feature'BytesListProto.Tensorflow.Core.Example.Feature
                              Feature'FloatListProto.Tensorflow.Core.Example.Feature
                              Feature'Int64ListProto.Tensorflow.Core.Example.Feature
                              Feature'KindProto.Tensorflow.Core.Example.Feature
                              FeatureConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FeatureConfiguration'ConfigProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FeatureConfiguration'FixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FeatureConfiguration'VarLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FeatureList 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featureListProto.Tensorflow.Core.Example.Feature
                              FeatureLists 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featureListsProto.Tensorflow.Core.Example.Example
                              FeatureLists'FeatureListEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featureMapProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              Features 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              featuresProto.Tensorflow.Core.Example.Example
                              Features'FeatureEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              filenameProto.Tensorflow.Core.Protobuf.MetaGraph
                              filenameTensorNameProto.Tensorflow.Core.Protobuf.Saver
                              fileVersionProto.Tensorflow.Core.Util.Event
                              fixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FixedLenFeatureProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              FloatList 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              floatList 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              floatValProto.Tensorflow.Core.Framework.Tensor
                              forceGpuCompatibleProto.Tensorflow.Core.Protobuf.Config
                              fullNameProto.Tensorflow.Core.Framework.Variable
                              fullShapeProto.Tensorflow.Core.Framework.Variable
                              funcProto.Tensorflow.Core.Framework.AttrValue
                              functionProto.Tensorflow.Core.Framework.Function
                              FunctionDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Function
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
                              FunctionDef'AttrEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Function
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
                              FunctionDef'RetEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Function
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
                              FunctionDefLibrary 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Function
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Function
                              functionNameProto.Tensorflow.Core.Framework.Function
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html index 33f0067..0c33d32 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - G)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html index 44e25df..343e5a2 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - H)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html index f918573..d8b8976 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - I)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - I

                              iProto.Tensorflow.Core.Framework.AttrValue
                              idProto.Tensorflow.Core.Framework.CostGraph
                              imageProto.Tensorflow.Core.Framework.Summary
                              incarnationProto.Tensorflow.Core.Framework.DeviceAttributes
                              indexProto.Tensorflow.Core.Framework.LogMemory
                              indicesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              indicesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              inferShapesProto.Tensorflow.Core.Protobuf.Config
                              initializerNameProto.Tensorflow.Core.Framework.Variable
                              inputProto.Tensorflow.Core.Framework.NodeDef
                              inputArgProto.Tensorflow.Core.Framework.OpDef
                              inputInfoProto.Tensorflow.Core.Framework.CostGraph
                              inputsProto.Tensorflow.Core.Protobuf.MetaGraph
                              Int64List 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Feature
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Feature
                              int64List 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              int64ValProto.Tensorflow.Core.Framework.Tensor
                              INTERNALProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
                              intValProto.Tensorflow.Core.Framework.Tensor
                              INVALID_ARGUMENTProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              isAggregateProto.Tensorflow.Core.Framework.OpDef
                              isCommutativeProto.Tensorflow.Core.Framework.OpDef
                              isFinalProto.Tensorflow.Core.Framework.CostGraph
                              isRefProto.Tensorflow.Core.Framework.OpDef
                              isResourceProto.Tensorflow.Core.Framework.Variable
                              isStatefulProto.Tensorflow.Core.Framework.OpDef
                              itersProto.Tensorflow.Core.Util.TestLog
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-J.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-J.html new file mode 100644 index 0000000..2cc367a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-J.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - J)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html index 457c499..f180ed9 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - K)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html index 69c144f..f34a4b7 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - L)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html index 1ca17c2..40f71f5 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - M)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - M

                              maxProto.Tensorflow.Core.Framework.Summary
                              maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
                              maybe'audioProto.Tensorflow.Core.Framework.Summary
                              maybe'bProto.Tensorflow.Core.Framework.AttrValue
                              maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
                              maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
                              maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
                              maybe'fProto.Tensorflow.Core.Framework.AttrValue
                              maybe'fileVersionProto.Tensorflow.Core.Util.Event
                              maybe'funcProto.Tensorflow.Core.Framework.AttrValue
                              maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'graphDefProto.Tensorflow.Core.Util.Event
                              maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'histoProto.Tensorflow.Core.Framework.Summary
                              maybe'iProto.Tensorflow.Core.Framework.AttrValue
                              maybe'imageProto.Tensorflow.Core.Framework.Summary
                              maybe'libraryProto.Tensorflow.Core.Framework.Graph
                              maybe'listProto.Tensorflow.Core.Framework.AttrValue
                              maybe'logMessageProto.Tensorflow.Core.Util.Event
                              maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
                              maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
                              maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'sProto.Tensorflow.Core.Framework.AttrValue
                              maybe'sessionLogProto.Tensorflow.Core.Util.Event
                              maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
                              maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
                              maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
                              maybe'summaryProto.Tensorflow.Core.Util.Event
                              maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
                              maybe'tensor 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
                              maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
                              maybe'value 
                              1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              maybe'versionsProto.Tensorflow.Core.Framework.Graph
                              maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
                              messageProto.Tensorflow.Core.Util.Event
                              metaGraphDefProto.Tensorflow.Core.Util.Event
                              minProto.Tensorflow.Core.Framework.Summary
                              minimumProto.Tensorflow.Core.Framework.OpDef
                              msgProto.Tensorflow.Core.Util.Event
                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - M

                              machineProto.Tensorflow.Core.Util.TestLog
                              MachineConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              machineConfigurationProto.Tensorflow.Core.Util.TestLog
                              maxProto.Tensorflow.Core.Framework.Summary
                              maxToKeepProto.Tensorflow.Core.Protobuf.Saver
                              maybe'allocationDescriptionProto.Tensorflow.Core.Framework.TensorDescription
                              maybe'allowedValues 
                              1 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              maybe'anyInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'anyListProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'audioProto.Tensorflow.Core.Framework.Summary
                              maybe'autoParallelProto.Tensorflow.Core.Protobuf.RewriterConfig
                              maybe'bProto.Tensorflow.Core.Framework.AttrValue
                              maybe'buildConfigurationProto.Tensorflow.Core.Util.TestLog
                              maybe'bytesList 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              maybe'changelistProto.Tensorflow.Core.Util.TestLog
                              maybe'clusterProto.Tensorflow.Core.Protobuf.TensorflowServer
                              maybe'clusterDefProto.Tensorflow.Core.Protobuf.Config
                              maybe'commitIdProto.Tensorflow.Core.Util.TestLog
                              maybe'configProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'contextProto.Tensorflow.Core.Example.Example
                              maybe'cooSparseProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
                              maybe'cpuInfoProto.Tensorflow.Core.Util.TestLog
                              maybe'data'Proto.Tensorflow.Core.Util.SavedTensorSlice
                              maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'defaultSessionConfigProto.Tensorflow.Core.Protobuf.TensorflowServer
                              maybe'defaultValue 
                              1 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
                              maybe'doubleValueProto.Tensorflow.Core.Util.TestLog
                              maybe'encodingProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'entriesProto.Tensorflow.Core.Util.TestLog
                              maybe'fProto.Tensorflow.Core.Framework.AttrValue
                              maybe'featureListsProto.Tensorflow.Core.Example.Example
                              maybe'featuresProto.Tensorflow.Core.Example.Example
                              maybe'fileVersionProto.Tensorflow.Core.Util.Event
                              maybe'fixedLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'floatList 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              maybe'funcProto.Tensorflow.Core.Framework.AttrValue
                              maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'graphDef 
                              1 (Function)Proto.Tensorflow.Core.Util.Event
                              2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'hashProto.Tensorflow.Core.Util.TestLog
                              maybe'hasLengthProto.Tensorflow.Core.Framework.TensorSlice
                              maybe'histoProto.Tensorflow.Core.Framework.Summary
                              maybe'iProto.Tensorflow.Core.Framework.AttrValue
                              maybe'imageProto.Tensorflow.Core.Framework.Summary
                              maybe'int64List 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Function)Proto.Tensorflow.Core.Example.Feature
                              maybe'kind 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              3 (Function)Proto.Tensorflow.Core.Example.Feature
                              maybe'lengthProto.Tensorflow.Core.Framework.TensorSlice
                              maybe'libraryProto.Tensorflow.Core.Framework.Graph
                              maybe'listProto.Tensorflow.Core.Framework.AttrValue
                              maybe'localityProto.Tensorflow.Core.Framework.DeviceAttributes
                              maybe'logMessageProto.Tensorflow.Core.Util.Event
                              maybe'machineConfigurationProto.Tensorflow.Core.Util.TestLog
                              maybe'memoryInfoProto.Tensorflow.Core.Util.TestLog
                              maybe'memoryStatsProto.Tensorflow.Core.Framework.StepStats
                              maybe'metaProto.Tensorflow.Core.Util.SavedTensorSlice
                              maybe'metadataProto.Tensorflow.Core.Framework.Summary
                              maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
                              maybe'metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'nameProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'nodeListProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
                              maybe'platformInfoProto.Tensorflow.Core.Util.TestLog
                              maybe'pluginDataProto.Tensorflow.Core.Framework.Summary
                              maybe'rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              maybe'runConfigurationProto.Tensorflow.Core.Util.TestLog
                              maybe'sProto.Tensorflow.Core.Framework.AttrValue
                              maybe'saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
                              maybe'sessionLogProto.Tensorflow.Core.Util.Event
                              maybe'shape 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              5 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
                              6 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'signatureProto.Tensorflow.Core.Framework.Function
                              maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
                              maybe'sliceProto.Tensorflow.Core.Util.SavedTensorSlice
                              maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
                              maybe'stringValueProto.Tensorflow.Core.Util.TestLog
                              maybe'strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'summaryProto.Tensorflow.Core.Util.Event
                              maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
                              maybe'tensor 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              4 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              maybe'tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
                              maybe'tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'tensorShape 
                              1 (Function)Proto.Tensorflow.Core.Framework.Tensor
                              2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
                              maybe'value 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Framework.Summary
                              3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              4 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              5 (Function)Proto.Tensorflow.Core.Framework.Function
                              6 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              7 (Function)Proto.Tensorflow.Core.Example.Feature
                              8 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
                              maybe'varLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              maybe'versionProto.Tensorflow.Core.Protobuf.TensorBundle
                              maybe'versions 
                              1 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Function)Proto.Tensorflow.Core.Framework.Graph
                              maybe'whatProto.Tensorflow.Core.Util.Event
                              maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
                              MemmappedFileSystemDirectory 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              MemmappedFileSystemDirectoryElement 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              memoryProto.Tensorflow.Core.Framework.StepStats
                              MemoryInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              memoryInfoProto.Tensorflow.Core.Util.TestLog
                              memoryLimit 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              MemoryLogRawAllocation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogRawDeallocation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogStep 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogTensorAllocation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogTensorDeallocation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              MemoryLogTensorOutput 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.LogMemory
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.LogMemory
                              memoryOptimizationProto.Tensorflow.Core.Protobuf.RewriterConfig
                              MemoryStats 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              memoryStatsProto.Tensorflow.Core.Framework.StepStats
                              memoryTimeProto.Tensorflow.Core.Framework.CostGraph
                              messageProto.Tensorflow.Core.Util.Event
                              metaProto.Tensorflow.Core.Util.SavedTensorSlice
                              metadataProto.Tensorflow.Core.Framework.Summary
                              MetaGraphDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              metaGraphDefProto.Tensorflow.Core.Util.Event
                              MetaGraphDef'CollectionDefEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              MetaGraphDef'MetaInfoDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              MetaGraphDef'SignatureDefEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              metaGraphsProto.Tensorflow.Core.Protobuf.SavedModel
                              metaGraphVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              metaInfoDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              methodNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              mhzPerCpuProto.Tensorflow.Core.Util.TestLog
                              minProto.Tensorflow.Core.Framework.Summary
                              minConsumerProto.Tensorflow.Core.Framework.Versions
                              minimumProto.Tensorflow.Core.Framework.OpDef
                              modeProto.Tensorflow.Core.Util.TestLog
                              modelProto.Tensorflow.Core.Util.TestLog
                              msgProto.Tensorflow.Core.Util.Event
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html index 337139c..dde9eed 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - N)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - N

                              name 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              3 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
                              4 (Function)Proto.Tensorflow.Core.Framework.TensorShape
                              5 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
                              6 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              7 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              8 (Function)Proto.Tensorflow.Core.Framework.DeviceAttributes
                              9 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              10 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              11 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              12 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              13 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              14 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              NameAttrList 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              NameAttrList'AttrEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
                              NamedTensorProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              node 
                              1 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              2 (Function)Proto.Tensorflow.Core.Framework.Graph
                              NodeDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
                              nodeDefProto.Tensorflow.Core.Framework.Function
                              NodeDef'AttrEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
                              NodeExecStats 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              nodeListProto.Tensorflow.Core.Protobuf.MetaGraph
                              nodeName 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.Debug
                              2 (Function)Proto.Tensorflow.Core.Framework.Summary
                              3 (Function)Proto.Tensorflow.Core.Framework.StepStats
                              NodeOutput 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              nodeStatsProto.Tensorflow.Core.Framework.StepStats
                              NOT_FOUNDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              numProto.Tensorflow.Core.Framework.Summary
                              numberAttrProto.Tensorflow.Core.Framework.OpDef
                              numBytesProto.Tensorflow.Core.Framework.LogMemory
                              numChannelsProto.Tensorflow.Core.Framework.Summary
                              numCoresProto.Tensorflow.Core.Util.TestLog
                              numCoresAllowedProto.Tensorflow.Core.Util.TestLog
                              numReplicasProto.Tensorflow.Core.Protobuf.RewriterConfig
                              numShardsProto.Tensorflow.Core.Protobuf.TensorBundle
                              numThreadsProto.Tensorflow.Core.Protobuf.Config
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html index 950fd48..cbb351c 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - O)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - O

                              obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              op 
                              1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef'ArgDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef'AttrDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDeprecation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              OpList 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OptimizerOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
                              optLevelProto.Tensorflow.Core.Protobuf.Config
                              outputArgProto.Tensorflow.Core.Framework.OpDef
                              outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - O

                              obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              offset 
                              1 (Function)Proto.Tensorflow.Core.Util.MemmappedFileSystem
                              2 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              OKProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              op 
                              1 (Function)Proto.Tensorflow.Core.Framework.KernelDef
                              2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              3 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef'ArgDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDef'AttrDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              OpDeprecation 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              opEndRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              operationProto.Tensorflow.Core.Framework.LogMemory
                              operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              OpList 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
                              opStartRelMicrosProto.Tensorflow.Core.Framework.StepStats
                              OptimizerOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
                              OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
                              optimizersProto.Tensorflow.Core.Protobuf.RewriterConfig
                              optimizeTensorLayoutProto.Tensorflow.Core.Protobuf.RewriterConfig
                              optLevelProto.Tensorflow.Core.Protobuf.Config
                              optsProto.Tensorflow.Core.Util.TestLog
                              outputProto.Tensorflow.Core.Framework.StepStats
                              outputArgProto.Tensorflow.Core.Framework.OpDef
                              outputInfoProto.Tensorflow.Core.Framework.CostGraph
                              outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              outputsProto.Tensorflow.Core.Protobuf.MetaGraph
                              outputSlotProto.Tensorflow.Core.Protobuf.Debug
                              OUT_OF_RANGEProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html index c5d282f..c35ce87 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - P)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - P

                              parallelIterationsProto.Tensorflow.Core.Protobuf.ControlFlow
                              partitionGraphsProto.Tensorflow.Core.Protobuf.Config
                              peakBytesProto.Tensorflow.Core.Framework.StepStats
                              PERMISSION_DENIEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
                              physicalDescriptionProto.Tensorflow.Core.Util.TestLog
                              physicalDeviceDescProto.Tensorflow.Core.Framework.DeviceAttributes
                              pivotForBodyNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              pivotForPredNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              pivotNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              placeholderProto.Tensorflow.Core.Framework.AttrValue
                              placementPeriodProto.Tensorflow.Core.Protobuf.Config
                              placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
                              PlatformInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              platformInfoProto.Tensorflow.Core.Util.TestLog
                              pluginDataProto.Tensorflow.Core.Framework.Summary
                              pluginNameProto.Tensorflow.Core.Framework.Summary
                              pollingActiveDelayUsecsProto.Tensorflow.Core.Protobuf.Config
                              pollingInactiveDelayMsecsProto.Tensorflow.Core.Protobuf.Config
                              precedingNodeProto.Tensorflow.Core.Framework.CostGraph
                              precedingPortProto.Tensorflow.Core.Framework.CostGraph
                              predNameProto.Tensorflow.Core.Protobuf.ControlFlow
                              producerProto.Tensorflow.Core.Framework.Versions
                              protocolProto.Tensorflow.Core.Protobuf.TensorflowServer
                              ptr 
                              1 (Function)Proto.Tensorflow.Core.Framework.AllocationDescription
                              2 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-Q.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-Q.html new file mode 100644 index 0000000..af4b4d1 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-Q.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - Q)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html index dccaa44..dc05dbe 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - R)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - R

                              referencedTensorProto.Tensorflow.Core.Framework.StepStats
                              releaseProto.Tensorflow.Core.Util.TestLog
                              requestedBytesProto.Tensorflow.Core.Framework.AllocationDescription
                              ResourceHandleProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
                              resourceHandleValProto.Tensorflow.Core.Framework.Tensor
                              RESOURCE_EXHAUSTEDProto.Tensorflow.Core.Lib.Core.ErrorCodes
                              restoreOpNameProto.Tensorflow.Core.Protobuf.Saver
                              retProto.Tensorflow.Core.Framework.Function
                              rewriteOptionsProto.Tensorflow.Core.Protobuf.Config
                              RewriterConfig 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.RewriterConfig
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.RewriterConfig
                              RewriterConfig'HEURISTICSProto.Tensorflow.Core.Protobuf.RewriterConfig
                              RewriterConfig'MANUALProto.Tensorflow.Core.Protobuf.RewriterConfig
                              RewriterConfig'MemOptTypeProto.Tensorflow.Core.Protobuf.RewriterConfig
                              RewriterConfig'NO_MEM_OPTProto.Tensorflow.Core.Protobuf.RewriterConfig
                              RPCOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              rpcOptionsProto.Tensorflow.Core.Protobuf.Config
                              RunConfiguration 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              runConfigurationProto.Tensorflow.Core.Util.TestLog
                              RunMetadata 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              runMetadataProto.Tensorflow.Core.Util.Event
                              runModeProto.Tensorflow.Core.Util.TestLog
                              RunOptions 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
                              RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
                              runTimeProto.Tensorflow.Core.Util.TestLog
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html index 6cff79d..5c05ee5 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - S)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - S

                              sProto.Tensorflow.Core.Framework.AttrValue
                              sampleRateProto.Tensorflow.Core.Framework.Summary
                              scomplexValProto.Tensorflow.Core.Framework.Tensor
                              sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              SessionLog 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              sessionLogProto.Tensorflow.Core.Util.Event
                              SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
                              SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
                              SessionLog'STARTProto.Tensorflow.Core.Util.Event
                              SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
                              SessionLog'STOPProto.Tensorflow.Core.Util.Event
                              shapeProto.Tensorflow.Core.Framework.AttrValue
                              simpleValueProto.Tensorflow.Core.Framework.Summary
                              sizeProto.Tensorflow.Core.Framework.TensorShape
                              statusProto.Tensorflow.Core.Util.Event
                              stepProto.Tensorflow.Core.Util.Event
                              stepStatsProto.Tensorflow.Core.Protobuf.Config
                              stringValProto.Tensorflow.Core.Framework.Tensor
                              sumProto.Tensorflow.Core.Framework.Summary
                              Summary 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              summary 
                              1 (Function)Proto.Tensorflow.Core.Util.Event
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              Summary'Audio 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Image 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Value 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              SummaryDescription 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              sumSquaresProto.Tensorflow.Core.Framework.Summary
                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - S

                              sProto.Tensorflow.Core.Framework.AttrValue
                              sampleRateProto.Tensorflow.Core.Framework.Summary
                              SavedModel 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.SavedModel
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.SavedModel
                              savedModelSchemaVersionProto.Tensorflow.Core.Protobuf.SavedModel
                              SavedSlice 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              SavedSliceMeta 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              SavedTensorSliceMeta 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              SavedTensorSlices 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              SaverDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Saver
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Saver
                              saverDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              SaverDef'CheckpointFormatVersionProto.Tensorflow.Core.Protobuf.Saver
                              SaverDef'LEGACYProto.Tensorflow.Core.Protobuf.Saver
                              SaverDef'V1Proto.Tensorflow.Core.Protobuf.Saver
                              SaverDef'V2Proto.Tensorflow.Core.Protobuf.Saver
                              SaveSliceInfoDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Variable
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Variable
                              saveSliceInfoDefProto.Tensorflow.Core.Framework.Variable
                              saveTensorNameProto.Tensorflow.Core.Protobuf.Saver
                              scheduledMicrosProto.Tensorflow.Core.Framework.StepStats
                              scomplexValProto.Tensorflow.Core.Framework.Tensor
                              SequenceExample 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.Example
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.Example
                              serialIdentifierProto.Tensorflow.Core.Util.TestLog
                              ServerDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.TensorflowServer
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.TensorflowServer
                              sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
                              SessionLog 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              sessionLogProto.Tensorflow.Core.Util.Event
                              SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
                              SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
                              SessionLog'STARTProto.Tensorflow.Core.Util.Event
                              SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
                              SessionLog'STOPProto.Tensorflow.Core.Util.Event
                              shape 
                              1 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              5 (Function)Proto.Tensorflow.Core.Framework.TensorDescription
                              6 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              shapesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              shardedProto.Tensorflow.Core.Protobuf.Saver
                              shardIdProto.Tensorflow.Core.Protobuf.TensorBundle
                              signatureProto.Tensorflow.Core.Framework.Function
                              SignatureDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              signatureDefProto.Tensorflow.Core.Protobuf.MetaGraph
                              SignatureDef'InputsEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              SignatureDef'OutputsEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              simpleValueProto.Tensorflow.Core.Framework.Summary
                              size 
                              1 (Function)Proto.Tensorflow.Core.Framework.TensorShape
                              2 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              3 (Function)Proto.Tensorflow.Core.Framework.CostGraph
                              sliceProto.Tensorflow.Core.Util.SavedTensorSlice
                              slicesProto.Tensorflow.Core.Protobuf.TensorBundle
                              slotProto.Tensorflow.Core.Framework.StepStats
                              snapshotProto.Tensorflow.Core.Util.TestLog
                              snapshotNameProto.Tensorflow.Core.Framework.Variable
                              startProto.Tensorflow.Core.Framework.TensorSlice
                              startTimeProto.Tensorflow.Core.Util.TestLog
                              statusProto.Tensorflow.Core.Util.Event
                              stepProto.Tensorflow.Core.Util.Event
                              stepIdProto.Tensorflow.Core.Framework.LogMemory
                              StepStats 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.StepStats
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.StepStats
                              stepStatsProto.Tensorflow.Core.Protobuf.Config
                              stringValProto.Tensorflow.Core.Framework.Tensor
                              stringValueProto.Tensorflow.Core.Util.TestLog
                              strippedOpListProto.Tensorflow.Core.Protobuf.MetaGraph
                              sumProto.Tensorflow.Core.Framework.Summary
                              Summary 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              summary 
                              1 (Function)Proto.Tensorflow.Core.Util.Event
                              2 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              Summary'Audio 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Image 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Value 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              Summary'Value'AudioProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'HistoProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'ImageProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'ObsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'SimpleValueProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'TensorProto.Tensorflow.Core.Framework.Summary
                              Summary'Value'ValueProto.Tensorflow.Core.Framework.Summary
                              SummaryDescription 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              summaryDescriptionProto.Tensorflow.Core.Framework.Summary
                              SummaryMetadata 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              SummaryMetadata'PluginData 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
                              sumSquaresProto.Tensorflow.Core.Framework.Summary
                              swapMemoryProto.Tensorflow.Core.Protobuf.ControlFlow
                              systemProto.Tensorflow.Core.Util.TestLog
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html index 9984df7..c53b019 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - T)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - T

                              tag 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Util.Event
                              TaggedRunMetadata 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.Event
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
                              taggedRunMetadataProto.Tensorflow.Core.Util.Event
                              tagsProto.Tensorflow.Core.Protobuf.MetaGraph
                              targetProto.Tensorflow.Core.Util.TestLog
                              taskIndexProto.Tensorflow.Core.Protobuf.TensorflowServer
                              tasksProto.Tensorflow.Core.Protobuf.Cluster
                              temporaryMemorySizeProto.Tensorflow.Core.Framework.CostGraph
                              tensor 
                              1 (Function)Proto.Tensorflow.Core.Framework.Summary
                              2 (Function)Proto.Tensorflow.Core.Protobuf.NamedTensor
                              3 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              5 (Function)Proto.Tensorflow.Core.Framework.LogMemory
                              tensorContentProto.Tensorflow.Core.Framework.Tensor
                              TensorDescription 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorDescription
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorDescription
                              tensorDescriptionProto.Tensorflow.Core.Framework.StepStats
                              tensorflowGitVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              tensorflowVersionProto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              tensorInfoProto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo'CooSparse 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo'CooSparse'Proto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo'EncodingProto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorInfo'NameProto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
                              tensorShape 
                              1 (Function)Proto.Tensorflow.Core.Framework.Tensor
                              2 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              TensorShapeProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
                              TensorShapeProto'Dim 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
                              TensorSliceProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorSlice
                              TensorSliceProto'Extent 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorSlice
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorSlice
                              TensorSliceProto'Extent'HasLengthProto.Tensorflow.Core.Framework.TensorSlice
                              TensorSliceProto'Extent'LengthProto.Tensorflow.Core.Framework.TensorSlice
                              TestResults 
                              1 (Type/Class)Proto.Tensorflow.Core.Util.TestLog
                              2 (Data Constructor)Proto.Tensorflow.Core.Util.TestLog
                              TestResults'ANDROID_BENCHMARKProto.Tensorflow.Core.Util.TestLog
                              TestResults'BenchmarkTypeProto.Tensorflow.Core.Util.TestLog
                              TestResults'CPP_MICROBENCHMARKProto.Tensorflow.Core.Util.TestLog
                              TestResults'PYTHON_BENCHMARKProto.Tensorflow.Core.Util.TestLog
                              TestResults'UNKNOWNProto.Tensorflow.Core.Util.TestLog
                              threadIdProto.Tensorflow.Core.Framework.StepStats
                              ThreadPoolOptionProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
                              throughputProto.Tensorflow.Core.Util.TestLog
                              timelineLabelProto.Tensorflow.Core.Framework.StepStats
                              timelineStepProto.Tensorflow.Core.Protobuf.Config
                              timeoutInMsProto.Tensorflow.Core.Protobuf.Config
                              tolerateDebugOpCreationFailuresProto.Tensorflow.Core.Protobuf.Debug
                              totalProto.Tensorflow.Core.Util.TestLog
                              totalBytesProto.Tensorflow.Core.Framework.StepStats
                              traceLevelProto.Tensorflow.Core.Protobuf.Config
                              type' 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              4 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              typeAttrProto.Tensorflow.Core.Framework.OpDef
                              typeHintProto.Tensorflow.Core.Framework.Summary
                              typeListAttrProto.Tensorflow.Core.Framework.OpDef
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html index e03d8c5..e57d26a 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - U)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html index bbd3137..7692fca 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - V)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Index - V

                              value 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              3 (Function)Proto.Tensorflow.Core.Protobuf.Cluster
                              4 (Function)Proto.Tensorflow.Core.Framework.Summary
                              5 (Function)Proto.Tensorflow.Core.Framework.AttrValue
                              6 (Function)Proto.Tensorflow.Core.Framework.NodeDef
                              7 (Function)Proto.Tensorflow.Core.Framework.Function
                              8 (Function)Proto.Tensorflow.Core.Protobuf.MetaGraph
                              9 (Function)Proto.Tensorflow.Core.Protobuf.Config
                              10 (Function)Proto.Tensorflow.Core.Example.Feature
                              11 (Function)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              valuesProto.Tensorflow.Core.Protobuf.ControlFlow
                              ValuesDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              valuesDefProto.Tensorflow.Core.Protobuf.ControlFlow
                              ValuesDef'ExternalValuesEntry 
                              1 (Type/Class)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.ControlFlow
                              valuesOutputTensorNameProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              valuesTensorNameProto.Tensorflow.Core.Protobuf.MetaGraph
                              VariableDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Variable
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Variable
                              variableNameProto.Tensorflow.Core.Framework.Variable
                              varLenFeatureProto.Tensorflow.Core.Example.ExampleParserConfiguration
                              VarLenFeatureProto 
                              1 (Type/Class)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              2 (Data Constructor)Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              varOffsetProto.Tensorflow.Core.Framework.Variable
                              varShapeProto.Tensorflow.Core.Framework.Variable
                              version 
                              1 (Function)Proto.Tensorflow.Core.Util.TestLog
                              2 (Function)Proto.Tensorflow.Core.Protobuf.Saver
                              3 (Function)Proto.Tensorflow.Core.Protobuf.TensorBundle
                              4 (Function)Proto.Tensorflow.Core.Framework.OpDef
                              5 (Function)Proto.Tensorflow.Core.Framework.Graph
                              VersionDef 
                              1 (Type/Class)Proto.Tensorflow.Core.Framework.Versions
                              2 (Data Constructor)Proto.Tensorflow.Core.Framework.Versions
                              versionNumberProto.Tensorflow.Core.Framework.Tensor
                              versions 
                              1 (Function)Proto.Tensorflow.Core.Util.SavedTensorSlice
                              2 (Function)Proto.Tensorflow.Core.Framework.Graph
                              visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-W.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-W.html index 54d8f04..6d8c9a6 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-W.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-W.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - W)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html index b26c50a..7f68a9f 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index)

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/frames.html b/docs/haddock/tensorflow-proto-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-proto-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-proto-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-proto-0.1.0.0/index-frames.html deleted file mode 100644 index 84031bb..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/index.html b/docs/haddock/tensorflow-proto-0.1.0.0/index.html index dccea21..03a2d23 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/index.html @@ -1,4 +1,4 @@ -tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              \ No newline at end of file +

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

                              Please see README.md

                              Modules

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-Example.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-Example.html new file mode 100644 index 0000000..66cfc42 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-Example.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Example.Example

                              Proto.Tensorflow.Core.Example.Example

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-ExampleParserConfiguration.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-ExampleParserConfiguration.html new file mode 100644 index 0000000..1f0998a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-ExampleParserConfiguration.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Example.ExampleParserConfiguration

                              Proto.Tensorflow.Core.Example.ExampleParserConfiguration

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-Feature.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-Feature.html new file mode 100644 index 0000000..e37e8a7 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Example-Feature.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Example.Feature

                              Proto.Tensorflow.Core.Example.Feature

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AllocationDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AllocationDescription.html new file mode 100644 index 0000000..507bc24 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AllocationDescription.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.AllocationDescription

                              Proto.Tensorflow.Core.Framework.AllocationDescription

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AttrValue.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AttrValue.html index a05d215..51c4292 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AttrValue.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AttrValue.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.AttrValue

                              Proto.Tensorflow.Core.Framework.AttrValue

                              \ No newline at end of file +

                              Proto.Tensorflow.Core.Framework.AttrValue

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-CostGraph.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-CostGraph.html new file mode 100644 index 0000000..7af2971 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-CostGraph.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.CostGraph

                              Proto.Tensorflow.Core.Framework.CostGraph

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-DeviceAttributes.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-DeviceAttributes.html new file mode 100644 index 0000000..8bb19f3 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-DeviceAttributes.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.DeviceAttributes

                              Proto.Tensorflow.Core.Framework.DeviceAttributes

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Function.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Function.html new file mode 100644 index 0000000..8c0fdb0 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Function.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Function

                              Proto.Tensorflow.Core.Framework.Function

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Graph.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Graph.html index 14f4318..da66506 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Graph.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Graph.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.Graph

                              Proto.Tensorflow.Core.Framework.Graph

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-KernelDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-KernelDef.html new file mode 100644 index 0000000..c651e23 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-KernelDef.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.KernelDef

                              Proto.Tensorflow.Core.Framework.KernelDef

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-LogMemory.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-LogMemory.html new file mode 100644 index 0000000..d5d5c52 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-LogMemory.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.LogMemory

                              Proto.Tensorflow.Core.Framework.LogMemory

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-NodeDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-NodeDef.html index 7d65c77..c153dce 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-NodeDef.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-NodeDef.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.NodeDef

                              Proto.Tensorflow.Core.Framework.NodeDef

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-OpDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-OpDef.html index 2cd795c..9fa0df7 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-OpDef.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-OpDef.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.OpDef

                              Proto.Tensorflow.Core.Framework.OpDef

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-ResourceHandle.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-ResourceHandle.html index 521fe5c..8b770bf 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-ResourceHandle.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-ResourceHandle.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.ResourceHandle

                              Proto.Tensorflow.Core.Framework.ResourceHandle

                              \ No newline at end of file +

                              Proto.Tensorflow.Core.Framework.ResourceHandle

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-StepStats.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-StepStats.html new file mode 100644 index 0000000..a1e87f2 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-StepStats.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.StepStats

                              Proto.Tensorflow.Core.Framework.StepStats

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Summary.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Summary.html index 5351bef..a74b3f2 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Summary.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Summary.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.Summary

                              Proto.Tensorflow.Core.Framework.Summary

                              \ No newline at end of file +

                              Proto.Tensorflow.Core.Framework.Summary

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Tensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Tensor.html index 275f22f..c92f6ad 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Tensor.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Tensor.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.Tensor

                              Proto.Tensorflow.Core.Framework.Tensor

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorDescription.html new file mode 100644 index 0000000..81a3c46 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorDescription.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.TensorDescription

                              Proto.Tensorflow.Core.Framework.TensorDescription

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorShape.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorShape.html index 29f6d63..f4ae6c9 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorShape.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorShape.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.TensorShape

                              Proto.Tensorflow.Core.Framework.TensorShape

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorSlice.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorSlice.html new file mode 100644 index 0000000..a9a3fd9 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorSlice.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.TensorSlice

                              Proto.Tensorflow.Core.Framework.TensorSlice

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Types.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Types.html index ab4e0a2..65f3258 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Types.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Types.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Framework.Types

                              Proto.Tensorflow.Core.Framework.Types

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Variable.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Variable.html new file mode 100644 index 0000000..3bffb44 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Variable.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Variable

                              Proto.Tensorflow.Core.Framework.Variable

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Versions.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Versions.html new file mode 100644 index 0000000..6d21b31 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Versions.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Versions

                              Proto.Tensorflow.Core.Framework.Versions

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Lib-Core-ErrorCodes.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Lib-Core-ErrorCodes.html new file mode 100644 index 0000000..cd98b2c --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Lib-Core-ErrorCodes.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Lib.Core.ErrorCodes

                              Proto.Tensorflow.Core.Lib.Core.ErrorCodes

                              data Code

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Cluster.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Cluster.html new file mode 100644 index 0000000..e6cc135 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Cluster.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.Cluster

                              Proto.Tensorflow.Core.Protobuf.Cluster

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html index 4626817..7f29787 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Protobuf.Config

                              Proto.Tensorflow.Core.Protobuf.Config

                              \ No newline at end of file +

                              Proto.Tensorflow.Core.Protobuf.Config

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-ControlFlow.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-ControlFlow.html new file mode 100644 index 0000000..2552eab --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-ControlFlow.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.ControlFlow

                              Proto.Tensorflow.Core.Protobuf.ControlFlow

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Debug.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Debug.html new file mode 100644 index 0000000..07f79c7 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Debug.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.Debug

                              Proto.Tensorflow.Core.Protobuf.Debug

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-MetaGraph.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-MetaGraph.html new file mode 100644 index 0000000..8cbb04d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-MetaGraph.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.MetaGraph

                              Proto.Tensorflow.Core.Protobuf.MetaGraph

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-NamedTensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-NamedTensor.html new file mode 100644 index 0000000..003c54a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-NamedTensor.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.NamedTensor

                              Proto.Tensorflow.Core.Protobuf.NamedTensor

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-QueueRunner.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-QueueRunner.html new file mode 100644 index 0000000..a868fc3 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-QueueRunner.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.QueueRunner

                              Proto.Tensorflow.Core.Protobuf.QueueRunner

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-RewriterConfig.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-RewriterConfig.html new file mode 100644 index 0000000..1148401 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-RewriterConfig.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.RewriterConfig

                              Proto.Tensorflow.Core.Protobuf.RewriterConfig

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-SavedModel.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-SavedModel.html new file mode 100644 index 0000000..581bf24 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-SavedModel.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.SavedModel

                              Proto.Tensorflow.Core.Protobuf.SavedModel

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Saver.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Saver.html new file mode 100644 index 0000000..0d07782 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Saver.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.Saver

                              Proto.Tensorflow.Core.Protobuf.Saver

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-TensorBundle.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-TensorBundle.html new file mode 100644 index 0000000..2ec21dd --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-TensorBundle.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.TensorBundle

                              Proto.Tensorflow.Core.Protobuf.TensorBundle

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-TensorflowServer.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-TensorflowServer.html new file mode 100644 index 0000000..d925fd0 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-TensorflowServer.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.TensorflowServer

                              Proto.Tensorflow.Core.Protobuf.TensorflowServer

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-Event.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-Event.html index 4af986d..dceedee 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-Event.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-Event.html @@ -1,4 +1,4 @@ -Proto.Tensorflow.Core.Util.Event

                              Proto.Tensorflow.Core.Util.Event

                              \ No newline at end of file +

                              Proto.Tensorflow.Core.Util.Event

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-MemmappedFileSystem.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-MemmappedFileSystem.html new file mode 100644 index 0000000..f5bfb72 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-MemmappedFileSystem.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Util.MemmappedFileSystem

                              Proto.Tensorflow.Core.Util.MemmappedFileSystem

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-SavedTensorSlice.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-SavedTensorSlice.html new file mode 100644 index 0000000..aa2713b --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-SavedTensorSlice.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Util.SavedTensorSlice

                              Proto.Tensorflow.Core.Util.SavedTensorSlice

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-TestLog.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-TestLog.html new file mode 100644 index 0000000..a3a8a7b --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-TestLog.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Util.TestLog

                              Proto.Tensorflow.Core.Util.TestLog

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/ocean.css b/docs/haddock/tensorflow-proto-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-proto-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.Example.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.Example.html new file mode 100644 index 0000000..cddd189 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.Example.html @@ -0,0 +1,214 @@ +
                              {- This file was auto-generated from tensorflow/core/example/example.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Example.Example where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Example.Feature
                              +
                              +data Example = Example{_Example'features ::
                              +                       !(Prelude.Maybe Proto.Tensorflow.Core.Example.Feature.Features)}
                              +             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Example.Feature.Features,
                              +          b ~ Proto.Tensorflow.Core.Example.Feature.Features,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "features" f Example Example a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Example'features
                              +                 (\ x__ y__ -> x__{_Example'features = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Example.Feature.Features,
                              +          b ~ Prelude.Maybe Proto.Tensorflow.Core.Example.Feature.Features,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'features" f Example Example a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Example'features
                              +                 (\ x__ y__ -> x__{_Example'features = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default Example where
                              +        def = Example{_Example'features = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message Example where
                              +        descriptor
                              +          = let features__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "features"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Example.Feature.Features)
                              +                      (Data.ProtoLens.OptionalField maybe'features)
                              +                      :: Data.ProtoLens.FieldDescriptor Example
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Example")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, features__field_descriptor)])
                              +                (Data.Map.fromList [("features", features__field_descriptor)])
                              +
                              +data SequenceExample = SequenceExample{_SequenceExample'context ::
                              +                                       !(Prelude.Maybe
                              +                                           Proto.Tensorflow.Core.Example.Feature.Features),
                              +                                       _SequenceExample'featureLists ::
                              +                                       !(Prelude.Maybe
                              +                                           Proto.Tensorflow.Core.Example.Feature.FeatureLists)}
                              +                     deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Example.Feature.Features,
                              +          b ~ Proto.Tensorflow.Core.Example.Feature.Features,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "context" f SequenceExample SequenceExample a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SequenceExample'context
                              +                 (\ x__ y__ -> x__{_SequenceExample'context = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Example.Feature.Features,
                              +          b ~ Prelude.Maybe Proto.Tensorflow.Core.Example.Feature.Features,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'context" f SequenceExample
                              +           SequenceExample
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SequenceExample'context
                              +                 (\ x__ y__ -> x__{_SequenceExample'context = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Example.Feature.FeatureLists,
                              +          b ~ Proto.Tensorflow.Core.Example.Feature.FeatureLists,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "featureLists" f SequenceExample
                              +           SequenceExample
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SequenceExample'featureLists
                              +                 (\ x__ y__ -> x__{_SequenceExample'featureLists = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Example.Feature.FeatureLists,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Example.Feature.FeatureLists,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'featureLists" f SequenceExample
                              +           SequenceExample
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SequenceExample'featureLists
                              +                 (\ x__ y__ -> x__{_SequenceExample'featureLists = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SequenceExample where
                              +        def
                              +          = SequenceExample{_SequenceExample'context = Prelude.Nothing,
                              +                            _SequenceExample'featureLists = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message SequenceExample where
                              +        descriptor
                              +          = let context__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "context"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Example.Feature.Features)
                              +                      (Data.ProtoLens.OptionalField maybe'context)
                              +                      :: Data.ProtoLens.FieldDescriptor SequenceExample
                              +                featureLists__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "feature_lists"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Example.Feature.FeatureLists)
                              +                      (Data.ProtoLens.OptionalField maybe'featureLists)
                              +                      :: Data.ProtoLens.FieldDescriptor SequenceExample
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SequenceExample")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, context__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, featureLists__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("context", context__field_descriptor),
                              +                    ("feature_lists", featureLists__field_descriptor)])
                              +
                              +context ::
                              +        forall f s t a b . (Lens.Labels.HasLens "context" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +context
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "context")
                              +
                              +featureLists ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "featureLists" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +featureLists
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "featureLists")
                              +
                              +features ::
                              +         forall f s t a b . (Lens.Labels.HasLens "features" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +features
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "features")
                              +
                              +maybe'context ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybe'context" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybe'context
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'context")
                              +
                              +maybe'featureLists ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "maybe'featureLists" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +maybe'featureLists
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'featureLists")
                              +
                              +maybe'features ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'features" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'features
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'features")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.ExampleParserConfiguration.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.ExampleParserConfiguration.html new file mode 100644 index 0000000..f53b691 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.ExampleParserConfiguration.html @@ -0,0 +1,720 @@ +
                              {- This file was auto-generated from tensorflow/core/example/example_parser_configuration.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Example.ExampleParserConfiguration
                              +       where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.Tensor
                              +import qualified Proto.Tensorflow.Core.Framework.TensorShape
                              +import qualified Proto.Tensorflow.Core.Framework.Types
                              +
                              +data ExampleParserConfiguration = ExampleParserConfiguration{_ExampleParserConfiguration'featureMap
                              +                                                             ::
                              +                                                             !(Data.Map.Map Data.Text.Text
                              +                                                                 FeatureConfiguration)}
                              +                                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text FeatureConfiguration,
                              +          b ~ Data.Map.Map Data.Text.Text FeatureConfiguration,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "featureMap" f ExampleParserConfiguration
                              +           ExampleParserConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ExampleParserConfiguration'featureMap
                              +                 (\ x__ y__ -> x__{_ExampleParserConfiguration'featureMap = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default ExampleParserConfiguration
                              +         where
                              +        def
                              +          = ExampleParserConfiguration{_ExampleParserConfiguration'featureMap
                              +                                         = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message ExampleParserConfiguration where
                              +        descriptor
                              +          = let featureMap__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "feature_map"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           ExampleParserConfiguration'FeatureMapEntry)
                              +                      (Data.ProtoLens.MapField key value featureMap)
                              +                      :: Data.ProtoLens.FieldDescriptor ExampleParserConfiguration
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.ExampleParserConfiguration")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, featureMap__field_descriptor)])
                              +                (Data.Map.fromList [("feature_map", featureMap__field_descriptor)])
                              +
                              +data ExampleParserConfiguration'FeatureMapEntry = ExampleParserConfiguration'FeatureMapEntry{_ExampleParserConfiguration'FeatureMapEntry'key
                              +                                                                                             ::
                              +                                                                                             !Data.Text.Text,
                              +                                                                                             _ExampleParserConfiguration'FeatureMapEntry'value
                              +                                                                                             ::
                              +                                                                                             !(Prelude.Maybe
                              +                                                                                                 FeatureConfiguration)}
                              +                                                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f
                              +           ExampleParserConfiguration'FeatureMapEntry
                              +           ExampleParserConfiguration'FeatureMapEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _ExampleParserConfiguration'FeatureMapEntry'key
                              +                 (\ x__ y__ ->
                              +                    x__{_ExampleParserConfiguration'FeatureMapEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ FeatureConfiguration, b ~ FeatureConfiguration,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f
                              +           ExampleParserConfiguration'FeatureMapEntry
                              +           ExampleParserConfiguration'FeatureMapEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _ExampleParserConfiguration'FeatureMapEntry'value
                              +                 (\ x__ y__ ->
                              +                    x__{_ExampleParserConfiguration'FeatureMapEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe FeatureConfiguration,
                              +          b ~ Prelude.Maybe FeatureConfiguration, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f
                              +           ExampleParserConfiguration'FeatureMapEntry
                              +           ExampleParserConfiguration'FeatureMapEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _ExampleParserConfiguration'FeatureMapEntry'value
                              +                 (\ x__ y__ ->
                              +                    x__{_ExampleParserConfiguration'FeatureMapEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default
                              +           ExampleParserConfiguration'FeatureMapEntry
                              +         where
                              +        def
                              +          = ExampleParserConfiguration'FeatureMapEntry{_ExampleParserConfiguration'FeatureMapEntry'key
                              +                                                         = Data.ProtoLens.fieldDefault,
                              +                                                       _ExampleParserConfiguration'FeatureMapEntry'value
                              +                                                         = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message
                              +           ExampleParserConfiguration'FeatureMapEntry
                              +         where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      ::
                              +                      Data.ProtoLens.FieldDescriptor
                              +                        ExampleParserConfiguration'FeatureMapEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor FeatureConfiguration)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      ::
                              +                      Data.ProtoLens.FieldDescriptor
                              +                        ExampleParserConfiguration'FeatureMapEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack
                              +                   "tensorflow.ExampleParserConfiguration.FeatureMapEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data FeatureConfiguration = FeatureConfiguration{_FeatureConfiguration'config
                              +                                                 :: !(Prelude.Maybe FeatureConfiguration'Config)}
                              +                          deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data FeatureConfiguration'Config = FeatureConfiguration'FixedLenFeature !FixedLenFeatureProto
                              +                                 | FeatureConfiguration'VarLenFeature !VarLenFeatureProto
                              +                                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Maybe FeatureConfiguration'Config,
                              +          b ~ Prelude.Maybe FeatureConfiguration'Config,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'config" f FeatureConfiguration
                              +           FeatureConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureConfiguration'config
                              +                 (\ x__ y__ -> x__{_FeatureConfiguration'config = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe FixedLenFeatureProto,
                              +          b ~ Prelude.Maybe FixedLenFeatureProto, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'fixedLenFeature" f FeatureConfiguration
                              +           FeatureConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureConfiguration'config
                              +                 (\ x__ y__ -> x__{_FeatureConfiguration'config = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just
                              +                          (FeatureConfiguration'FixedLenFeature x__val) -> Prelude.Just
                              +                                                                             x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap FeatureConfiguration'FixedLenFeature y__))
                              +
                              +instance (a ~ FixedLenFeatureProto, b ~ FixedLenFeatureProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "fixedLenFeature" f FeatureConfiguration
                              +           FeatureConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureConfiguration'config
                              +                 (\ x__ y__ -> x__{_FeatureConfiguration'config = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just
                              +                             (FeatureConfiguration'FixedLenFeature x__val) -> Prelude.Just
                              +                                                                                x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap FeatureConfiguration'FixedLenFeature y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe VarLenFeatureProto,
                              +          b ~ Prelude.Maybe VarLenFeatureProto, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'varLenFeature" f FeatureConfiguration
                              +           FeatureConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureConfiguration'config
                              +                 (\ x__ y__ -> x__{_FeatureConfiguration'config = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just
                              +                          (FeatureConfiguration'VarLenFeature x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap FeatureConfiguration'VarLenFeature y__))
                              +
                              +instance (a ~ VarLenFeatureProto, b ~ VarLenFeatureProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "varLenFeature" f FeatureConfiguration
                              +           FeatureConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureConfiguration'config
                              +                 (\ x__ y__ -> x__{_FeatureConfiguration'config = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just
                              +                             (FeatureConfiguration'VarLenFeature x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap FeatureConfiguration'VarLenFeature y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance Data.Default.Class.Default FeatureConfiguration where
                              +        def
                              +          = FeatureConfiguration{_FeatureConfiguration'config =
                              +                                   Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message FeatureConfiguration where
                              +        descriptor
                              +          = let fixedLenFeature__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "fixed_len_feature"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor FixedLenFeatureProto)
                              +                      (Data.ProtoLens.OptionalField maybe'fixedLenFeature)
                              +                      :: Data.ProtoLens.FieldDescriptor FeatureConfiguration
                              +                varLenFeature__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "var_len_feature"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor VarLenFeatureProto)
                              +                      (Data.ProtoLens.OptionalField maybe'varLenFeature)
                              +                      :: Data.ProtoLens.FieldDescriptor FeatureConfiguration
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FeatureConfiguration")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, fixedLenFeature__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, varLenFeature__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("fixed_len_feature", fixedLenFeature__field_descriptor),
                              +                    ("var_len_feature", varLenFeature__field_descriptor)])
                              +
                              +data FixedLenFeatureProto = FixedLenFeatureProto{_FixedLenFeatureProto'dtype
                              +                                                 :: !Proto.Tensorflow.Core.Framework.Types.DataType,
                              +                                                 _FixedLenFeatureProto'shape ::
                              +                                                 !(Prelude.Maybe
                              +                                                     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto),
                              +                                                 _FixedLenFeatureProto'defaultValue ::
                              +                                                 !(Prelude.Maybe
                              +                                                     Proto.Tensorflow.Core.Framework.Tensor.TensorProto),
                              +                                                 _FixedLenFeatureProto'valuesOutputTensorName ::
                              +                                                 !Data.Text.Text}
                              +                          deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "dtype" f FixedLenFeatureProto
                              +           FixedLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FixedLenFeatureProto'dtype
                              +                 (\ x__ y__ -> x__{_FixedLenFeatureProto'dtype = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "shape" f FixedLenFeatureProto
                              +           FixedLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FixedLenFeatureProto'shape
                              +                 (\ x__ y__ -> x__{_FixedLenFeatureProto'shape = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'shape" f FixedLenFeatureProto
                              +           FixedLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FixedLenFeatureProto'shape
                              +                 (\ x__ y__ -> x__{_FixedLenFeatureProto'shape = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "defaultValue" f FixedLenFeatureProto
                              +           FixedLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FixedLenFeatureProto'defaultValue
                              +                 (\ x__ y__ -> x__{_FixedLenFeatureProto'defaultValue = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'defaultValue" f FixedLenFeatureProto
                              +           FixedLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FixedLenFeatureProto'defaultValue
                              +                 (\ x__ y__ -> x__{_FixedLenFeatureProto'defaultValue = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "valuesOutputTensorName" f FixedLenFeatureProto
                              +           FixedLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _FixedLenFeatureProto'valuesOutputTensorName
                              +                 (\ x__ y__ ->
                              +                    x__{_FixedLenFeatureProto'valuesOutputTensorName = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default FixedLenFeatureProto where
                              +        def
                              +          = FixedLenFeatureProto{_FixedLenFeatureProto'dtype =
                              +                                   Data.Default.Class.def,
                              +                                 _FixedLenFeatureProto'shape = Prelude.Nothing,
                              +                                 _FixedLenFeatureProto'defaultValue = Prelude.Nothing,
                              +                                 _FixedLenFeatureProto'valuesOutputTensorName =
                              +                                   Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message FixedLenFeatureProto where
                              +        descriptor
                              +          = let dtype__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dtype"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
                              +                      :: Data.ProtoLens.FieldDescriptor FixedLenFeatureProto
                              +                shape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "shape"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
                              +                      (Data.ProtoLens.OptionalField maybe'shape)
                              +                      :: Data.ProtoLens.FieldDescriptor FixedLenFeatureProto
                              +                defaultValue__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "default_value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
                              +                      (Data.ProtoLens.OptionalField maybe'defaultValue)
                              +                      :: Data.ProtoLens.FieldDescriptor FixedLenFeatureProto
                              +                valuesOutputTensorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "values_output_tensor_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         valuesOutputTensorName)
                              +                      :: Data.ProtoLens.FieldDescriptor FixedLenFeatureProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FixedLenFeatureProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, dtype__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, shape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, defaultValue__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, valuesOutputTensorName__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("dtype", dtype__field_descriptor),
                              +                    ("shape", shape__field_descriptor),
                              +                    ("default_value", defaultValue__field_descriptor),
                              +                    ("values_output_tensor_name",
                              +                     valuesOutputTensorName__field_descriptor)])
                              +
                              +data VarLenFeatureProto = VarLenFeatureProto{_VarLenFeatureProto'dtype
                              +                                             :: !Proto.Tensorflow.Core.Framework.Types.DataType,
                              +                                             _VarLenFeatureProto'valuesOutputTensorName ::
                              +                                             !Data.Text.Text,
                              +                                             _VarLenFeatureProto'indicesOutputTensorName ::
                              +                                             !Data.Text.Text,
                              +                                             _VarLenFeatureProto'shapesOutputTensorName ::
                              +                                             !Data.Text.Text}
                              +                        deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "dtype" f VarLenFeatureProto VarLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VarLenFeatureProto'dtype
                              +                 (\ x__ y__ -> x__{_VarLenFeatureProto'dtype = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "valuesOutputTensorName" f VarLenFeatureProto
                              +           VarLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _VarLenFeatureProto'valuesOutputTensorName
                              +                 (\ x__ y__ ->
                              +                    x__{_VarLenFeatureProto'valuesOutputTensorName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "indicesOutputTensorName" f VarLenFeatureProto
                              +           VarLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _VarLenFeatureProto'indicesOutputTensorName
                              +                 (\ x__ y__ ->
                              +                    x__{_VarLenFeatureProto'indicesOutputTensorName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "shapesOutputTensorName" f VarLenFeatureProto
                              +           VarLenFeatureProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _VarLenFeatureProto'shapesOutputTensorName
                              +                 (\ x__ y__ ->
                              +                    x__{_VarLenFeatureProto'shapesOutputTensorName = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default VarLenFeatureProto where
                              +        def
                              +          = VarLenFeatureProto{_VarLenFeatureProto'dtype =
                              +                                 Data.Default.Class.def,
                              +                               _VarLenFeatureProto'valuesOutputTensorName =
                              +                                 Data.ProtoLens.fieldDefault,
                              +                               _VarLenFeatureProto'indicesOutputTensorName =
                              +                                 Data.ProtoLens.fieldDefault,
                              +                               _VarLenFeatureProto'shapesOutputTensorName =
                              +                                 Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message VarLenFeatureProto where
                              +        descriptor
                              +          = let dtype__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dtype"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
                              +                      :: Data.ProtoLens.FieldDescriptor VarLenFeatureProto
                              +                valuesOutputTensorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "values_output_tensor_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         valuesOutputTensorName)
                              +                      :: Data.ProtoLens.FieldDescriptor VarLenFeatureProto
                              +                indicesOutputTensorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "indices_output_tensor_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         indicesOutputTensorName)
                              +                      :: Data.ProtoLens.FieldDescriptor VarLenFeatureProto
                              +                shapesOutputTensorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "shapes_output_tensor_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         shapesOutputTensorName)
                              +                      :: Data.ProtoLens.FieldDescriptor VarLenFeatureProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.VarLenFeatureProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, dtype__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, valuesOutputTensorName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, indicesOutputTensorName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, shapesOutputTensorName__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("dtype", dtype__field_descriptor),
                              +                    ("values_output_tensor_name",
                              +                     valuesOutputTensorName__field_descriptor),
                              +                    ("indices_output_tensor_name",
                              +                     indicesOutputTensorName__field_descriptor),
                              +                    ("shapes_output_tensor_name",
                              +                     shapesOutputTensorName__field_descriptor)])
                              +
                              +defaultValue ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "defaultValue" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +defaultValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "defaultValue")
                              +
                              +dtype ::
                              +      forall f s t a b . (Lens.Labels.HasLens "dtype" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +dtype
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "dtype")
                              +
                              +featureMap ::
                              +           forall f s t a b . (Lens.Labels.HasLens "featureMap" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +featureMap
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "featureMap")
                              +
                              +fixedLenFeature ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "fixedLenFeature" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +fixedLenFeature
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "fixedLenFeature")
                              +
                              +indicesOutputTensorName ::
                              +                        forall f s t a b .
                              +                          (Lens.Labels.HasLens "indicesOutputTensorName" f s t a b) =>
                              +                          Lens.Family2.LensLike f s t a b
                              +indicesOutputTensorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "indicesOutputTensorName")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +maybe'config ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "maybe'config" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +maybe'config
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'config")
                              +
                              +maybe'defaultValue ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "maybe'defaultValue" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +maybe'defaultValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'defaultValue")
                              +
                              +maybe'fixedLenFeature ::
                              +                      forall f s t a b .
                              +                        (Lens.Labels.HasLens "maybe'fixedLenFeature" f s t a b) =>
                              +                        Lens.Family2.LensLike f s t a b
                              +maybe'fixedLenFeature
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'fixedLenFeature")
                              +
                              +maybe'shape ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'shape" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'shape")
                              +
                              +maybe'value ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'value" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'value")
                              +
                              +maybe'varLenFeature ::
                              +                    forall f s t a b .
                              +                      (Lens.Labels.HasLens "maybe'varLenFeature" f s t a b) =>
                              +                      Lens.Family2.LensLike f s t a b
                              +maybe'varLenFeature
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'varLenFeature")
                              +
                              +shape ::
                              +      forall f s t a b . (Lens.Labels.HasLens "shape" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "shape")
                              +
                              +shapesOutputTensorName ::
                              +                       forall f s t a b .
                              +                         (Lens.Labels.HasLens "shapesOutputTensorName" f s t a b) =>
                              +                         Lens.Family2.LensLike f s t a b
                              +shapesOutputTensorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "shapesOutputTensorName")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              +
                              +valuesOutputTensorName ::
                              +                       forall f s t a b .
                              +                         (Lens.Labels.HasLens "valuesOutputTensorName" f s t a b) =>
                              +                         Lens.Family2.LensLike f s t a b
                              +valuesOutputTensorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "valuesOutputTensorName")
                              +
                              +varLenFeature ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "varLenFeature" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +varLenFeature
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "varLenFeature")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.Feature.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.Feature.html new file mode 100644 index 0000000..d4e3a75 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Example.Feature.html @@ -0,0 +1,598 @@ +
                              {- This file was auto-generated from tensorflow/core/example/feature.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Example.Feature where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data BytesList = BytesList{_BytesList'value ::
                              +                           ![Data.ByteString.ByteString]}
                              +               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Data.ByteString.ByteString],
                              +          b ~ [Data.ByteString.ByteString], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f BytesList BytesList a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BytesList'value
                              +                 (\ x__ y__ -> x__{_BytesList'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default BytesList where
                              +        def = BytesList{_BytesList'value = []}
                              +
                              +instance Data.ProtoLens.Message BytesList where
                              +        descriptor
                              +          = let value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked value)
                              +                      :: Data.ProtoLens.FieldDescriptor BytesList
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.BytesList")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, value__field_descriptor)])
                              +                (Data.Map.fromList [("value", value__field_descriptor)])
                              +
                              +data Feature = Feature{_Feature'kind ::
                              +                       !(Prelude.Maybe Feature'Kind)}
                              +             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data Feature'Kind = Feature'BytesList !BytesList
                              +                  | Feature'FloatList !FloatList
                              +                  | Feature'Int64List !Int64List
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Maybe Feature'Kind,
                              +          b ~ Prelude.Maybe Feature'Kind, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'kind" f Feature Feature a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Feature'kind
                              +                 (\ x__ y__ -> x__{_Feature'kind = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe BytesList, b ~ Prelude.Maybe BytesList,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'bytesList" f Feature Feature a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Feature'kind
                              +                 (\ x__ y__ -> x__{_Feature'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Feature'BytesList x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Feature'BytesList y__))
                              +
                              +instance (a ~ BytesList, b ~ BytesList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "bytesList" f Feature Feature a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Feature'kind
                              +                 (\ x__ y__ -> x__{_Feature'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Feature'BytesList x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Feature'BytesList y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe FloatList, b ~ Prelude.Maybe FloatList,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'floatList" f Feature Feature a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Feature'kind
                              +                 (\ x__ y__ -> x__{_Feature'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Feature'FloatList x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Feature'FloatList y__))
                              +
                              +instance (a ~ FloatList, b ~ FloatList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "floatList" f Feature Feature a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Feature'kind
                              +                 (\ x__ y__ -> x__{_Feature'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Feature'FloatList x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Feature'FloatList y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe Int64List, b ~ Prelude.Maybe Int64List,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'int64List" f Feature Feature a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Feature'kind
                              +                 (\ x__ y__ -> x__{_Feature'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Feature'Int64List x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Feature'Int64List y__))
                              +
                              +instance (a ~ Int64List, b ~ Int64List, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "int64List" f Feature Feature a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Feature'kind
                              +                 (\ x__ y__ -> x__{_Feature'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Feature'Int64List x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Feature'Int64List y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance Data.Default.Class.Default Feature where
                              +        def = Feature{_Feature'kind = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message Feature where
                              +        descriptor
                              +          = let bytesList__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "bytes_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor BytesList)
                              +                      (Data.ProtoLens.OptionalField maybe'bytesList)
                              +                      :: Data.ProtoLens.FieldDescriptor Feature
                              +                floatList__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "float_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor FloatList)
                              +                      (Data.ProtoLens.OptionalField maybe'floatList)
                              +                      :: Data.ProtoLens.FieldDescriptor Feature
                              +                int64List__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "int64_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Int64List)
                              +                      (Data.ProtoLens.OptionalField maybe'int64List)
                              +                      :: Data.ProtoLens.FieldDescriptor Feature
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Feature")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, bytesList__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, floatList__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, int64List__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("bytes_list", bytesList__field_descriptor),
                              +                    ("float_list", floatList__field_descriptor),
                              +                    ("int64_list", int64List__field_descriptor)])
                              +
                              +data FeatureList = FeatureList{_FeatureList'feature :: ![Feature]}
                              +                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Feature], b ~ [Feature], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "feature" f FeatureList FeatureList a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureList'feature
                              +                 (\ x__ y__ -> x__{_FeatureList'feature = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default FeatureList where
                              +        def = FeatureList{_FeatureList'feature = []}
                              +
                              +instance Data.ProtoLens.Message FeatureList where
                              +        descriptor
                              +          = let feature__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "feature"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Feature)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked feature)
                              +                      :: Data.ProtoLens.FieldDescriptor FeatureList
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FeatureList")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, feature__field_descriptor)])
                              +                (Data.Map.fromList [("feature", feature__field_descriptor)])
                              +
                              +data FeatureLists = FeatureLists{_FeatureLists'featureList ::
                              +                                 !(Data.Map.Map Data.Text.Text FeatureList)}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text FeatureList,
                              +          b ~ Data.Map.Map Data.Text.Text FeatureList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "featureList" f FeatureLists FeatureLists a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureLists'featureList
                              +                 (\ x__ y__ -> x__{_FeatureLists'featureList = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default FeatureLists where
                              +        def = FeatureLists{_FeatureLists'featureList = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message FeatureLists where
                              +        descriptor
                              +          = let featureList__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "feature_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor FeatureLists'FeatureListEntry)
                              +                      (Data.ProtoLens.MapField key value featureList)
                              +                      :: Data.ProtoLens.FieldDescriptor FeatureLists
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FeatureLists")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, featureList__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("feature_list", featureList__field_descriptor)])
                              +
                              +data FeatureLists'FeatureListEntry = FeatureLists'FeatureListEntry{_FeatureLists'FeatureListEntry'key
                              +                                                                   :: !Data.Text.Text,
                              +                                                                   _FeatureLists'FeatureListEntry'value
                              +                                                                   :: !(Prelude.Maybe FeatureList)}
                              +                                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f FeatureLists'FeatureListEntry
                              +           FeatureLists'FeatureListEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureLists'FeatureListEntry'key
                              +                 (\ x__ y__ -> x__{_FeatureLists'FeatureListEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ FeatureList, b ~ FeatureList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f FeatureLists'FeatureListEntry
                              +           FeatureLists'FeatureListEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureLists'FeatureListEntry'value
                              +                 (\ x__ y__ -> x__{_FeatureLists'FeatureListEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe FeatureList,
                              +          b ~ Prelude.Maybe FeatureList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f FeatureLists'FeatureListEntry
                              +           FeatureLists'FeatureListEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FeatureLists'FeatureListEntry'value
                              +                 (\ x__ y__ -> x__{_FeatureLists'FeatureListEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default FeatureLists'FeatureListEntry
                              +         where
                              +        def
                              +          = FeatureLists'FeatureListEntry{_FeatureLists'FeatureListEntry'key
                              +                                            = Data.ProtoLens.fieldDefault,
                              +                                          _FeatureLists'FeatureListEntry'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message FeatureLists'FeatureListEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor FeatureLists'FeatureListEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor FeatureList)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor FeatureLists'FeatureListEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FeatureLists.FeatureListEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data Features = Features{_Features'feature ::
                              +                         !(Data.Map.Map Data.Text.Text Feature)}
                              +              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text Feature,
                              +          b ~ Data.Map.Map Data.Text.Text Feature, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "feature" f Features Features a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Features'feature
                              +                 (\ x__ y__ -> x__{_Features'feature = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default Features where
                              +        def = Features{_Features'feature = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message Features where
                              +        descriptor
                              +          = let feature__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "feature"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Features'FeatureEntry)
                              +                      (Data.ProtoLens.MapField key value feature)
                              +                      :: Data.ProtoLens.FieldDescriptor Features
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Features")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, feature__field_descriptor)])
                              +                (Data.Map.fromList [("feature", feature__field_descriptor)])
                              +
                              +data Features'FeatureEntry = Features'FeatureEntry{_Features'FeatureEntry'key
                              +                                                   :: !Data.Text.Text,
                              +                                                   _Features'FeatureEntry'value ::
                              +                                                   !(Prelude.Maybe Feature)}
                              +                           deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f Features'FeatureEntry
                              +           Features'FeatureEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Features'FeatureEntry'key
                              +                 (\ x__ y__ -> x__{_Features'FeatureEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Feature, b ~ Feature, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f Features'FeatureEntry
                              +           Features'FeatureEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Features'FeatureEntry'value
                              +                 (\ x__ y__ -> x__{_Features'FeatureEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe Feature, b ~ Prelude.Maybe Feature,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f Features'FeatureEntry
                              +           Features'FeatureEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Features'FeatureEntry'value
                              +                 (\ x__ y__ -> x__{_Features'FeatureEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default Features'FeatureEntry where
                              +        def
                              +          = Features'FeatureEntry{_Features'FeatureEntry'key =
                              +                                    Data.ProtoLens.fieldDefault,
                              +                                  _Features'FeatureEntry'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message Features'FeatureEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor Features'FeatureEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Feature)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor Features'FeatureEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Features.FeatureEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data FloatList = FloatList{_FloatList'value :: ![Prelude.Float]}
                              +               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Prelude.Float], b ~ [Prelude.Float],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f FloatList FloatList a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FloatList'value
                              +                 (\ x__ y__ -> x__{_FloatList'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default FloatList where
                              +        def = FloatList{_FloatList'value = []}
                              +
                              +instance Data.ProtoLens.Message FloatList where
                              +        descriptor
                              +          = let value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.FloatField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed value)
                              +                      :: Data.ProtoLens.FieldDescriptor FloatList
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FloatList")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, value__field_descriptor)])
                              +                (Data.Map.fromList [("value", value__field_descriptor)])
                              +
                              +data Int64List = Int64List{_Int64List'value :: ![Data.Int.Int64]}
                              +               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Data.Int.Int64], b ~ [Data.Int.Int64],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f Int64List Int64List a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Int64List'value
                              +                 (\ x__ y__ -> x__{_Int64List'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default Int64List where
                              +        def = Int64List{_Int64List'value = []}
                              +
                              +instance Data.ProtoLens.Message Int64List where
                              +        descriptor
                              +          = let value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed value)
                              +                      :: Data.ProtoLens.FieldDescriptor Int64List
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Int64List")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, value__field_descriptor)])
                              +                (Data.Map.fromList [("value", value__field_descriptor)])
                              +
                              +bytesList ::
                              +          forall f s t a b . (Lens.Labels.HasLens "bytesList" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +bytesList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "bytesList")
                              +
                              +feature ::
                              +        forall f s t a b . (Lens.Labels.HasLens "feature" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +feature
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "feature")
                              +
                              +featureList ::
                              +            forall f s t a b . (Lens.Labels.HasLens "featureList" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +featureList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "featureList")
                              +
                              +floatList ::
                              +          forall f s t a b . (Lens.Labels.HasLens "floatList" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +floatList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "floatList")
                              +
                              +int64List ::
                              +          forall f s t a b . (Lens.Labels.HasLens "int64List" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +int64List
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "int64List")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +maybe'bytesList ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'bytesList" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'bytesList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'bytesList")
                              +
                              +maybe'floatList ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'floatList" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'floatList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'floatList")
                              +
                              +maybe'int64List ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'int64List" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'int64List
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'int64List")
                              +
                              +maybe'kind ::
                              +           forall f s t a b . (Lens.Labels.HasLens "maybe'kind" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +maybe'kind
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'kind")
                              +
                              +maybe'value ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'value" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'value")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.AllocationDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.AllocationDescription.html new file mode 100644 index 0000000..218b266 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.AllocationDescription.html @@ -0,0 +1,235 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/allocation_description.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.AllocationDescription where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data AllocationDescription = AllocationDescription{_AllocationDescription'requestedBytes
                              +                                                   :: !Data.Int.Int64,
                              +                                                   _AllocationDescription'allocatedBytes ::
                              +                                                   !Data.Int.Int64,
                              +                                                   _AllocationDescription'allocatorName ::
                              +                                                   !Data.Text.Text,
                              +                                                   _AllocationDescription'allocationId ::
                              +                                                   !Data.Int.Int64,
                              +                                                   _AllocationDescription'hasSingleReference ::
                              +                                                   !Prelude.Bool,
                              +                                                   _AllocationDescription'ptr :: !Data.Word.Word64}
                              +                           deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "requestedBytes" f AllocationDescription
                              +           AllocationDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AllocationDescription'requestedBytes
                              +                 (\ x__ y__ -> x__{_AllocationDescription'requestedBytes = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocatedBytes" f AllocationDescription
                              +           AllocationDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AllocationDescription'allocatedBytes
                              +                 (\ x__ y__ -> x__{_AllocationDescription'allocatedBytes = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocatorName" f AllocationDescription
                              +           AllocationDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AllocationDescription'allocatorName
                              +                 (\ x__ y__ -> x__{_AllocationDescription'allocatorName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocationId" f AllocationDescription
                              +           AllocationDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AllocationDescription'allocationId
                              +                 (\ x__ y__ -> x__{_AllocationDescription'allocationId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hasSingleReference" f AllocationDescription
                              +           AllocationDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _AllocationDescription'hasSingleReference
                              +                 (\ x__ y__ ->
                              +                    x__{_AllocationDescription'hasSingleReference = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Word.Word64, b ~ Data.Word.Word64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "ptr" f AllocationDescription
                              +           AllocationDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AllocationDescription'ptr
                              +                 (\ x__ y__ -> x__{_AllocationDescription'ptr = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default AllocationDescription where
                              +        def
                              +          = AllocationDescription{_AllocationDescription'requestedBytes =
                              +                                    Data.ProtoLens.fieldDefault,
                              +                                  _AllocationDescription'allocatedBytes =
                              +                                    Data.ProtoLens.fieldDefault,
                              +                                  _AllocationDescription'allocatorName =
                              +                                    Data.ProtoLens.fieldDefault,
                              +                                  _AllocationDescription'allocationId = Data.ProtoLens.fieldDefault,
                              +                                  _AllocationDescription'hasSingleReference =
                              +                                    Data.ProtoLens.fieldDefault,
                              +                                  _AllocationDescription'ptr = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message AllocationDescription where
                              +        descriptor
                              +          = let requestedBytes__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "requested_bytes"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional requestedBytes)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocationDescription
                              +                allocatedBytes__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocated_bytes"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatedBytes)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocationDescription
                              +                allocatorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocator_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorName)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocationDescription
                              +                allocationId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocation_id"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocationId)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocationDescription
                              +                hasSingleReference__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "has_single_reference"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         hasSingleReference)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocationDescription
                              +                ptr__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "ptr"
                              +                      (Data.ProtoLens.UInt64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional ptr)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocationDescription
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.AllocationDescription")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, requestedBytes__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, allocatedBytes__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, allocatorName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, allocationId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, hasSingleReference__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, ptr__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("requested_bytes", requestedBytes__field_descriptor),
                              +                    ("allocated_bytes", allocatedBytes__field_descriptor),
                              +                    ("allocator_name", allocatorName__field_descriptor),
                              +                    ("allocation_id", allocationId__field_descriptor),
                              +                    ("has_single_reference", hasSingleReference__field_descriptor),
                              +                    ("ptr", ptr__field_descriptor)])
                              +
                              +allocatedBytes ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "allocatedBytes" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +allocatedBytes
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allocatedBytes")
                              +
                              +allocationId ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "allocationId" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +allocationId
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allocationId")
                              +
                              +allocatorName ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "allocatorName" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +allocatorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allocatorName")
                              +
                              +hasSingleReference ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "hasSingleReference" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +hasSingleReference
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "hasSingleReference")
                              +
                              +ptr ::
                              +    forall f s t a b . (Lens.Labels.HasLens "ptr" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +ptr
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "ptr")
                              +
                              +requestedBytes ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "requestedBytes" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +requestedBytes
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "requestedBytes")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.AttrValue.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.AttrValue.html new file mode 100644 index 0000000..8825c6f --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.AttrValue.html @@ -0,0 +1,974 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/attr_value.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.AttrValue where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.Tensor
                              +import qualified Proto.Tensorflow.Core.Framework.TensorShape
                              +import qualified Proto.Tensorflow.Core.Framework.Types
                              +
                              +data AttrValue = AttrValue{_AttrValue'value ::
                              +                           !(Prelude.Maybe AttrValue'Value)}
                              +               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data AttrValue'Value = AttrValue'S !Data.ByteString.ByteString
                              +                     | AttrValue'I !Data.Int.Int64
                              +                     | AttrValue'F !Prelude.Float
                              +                     | AttrValue'B !Prelude.Bool
                              +                     | AttrValue'Type !Proto.Tensorflow.Core.Framework.Types.DataType
                              +                     | AttrValue'Shape !Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
                              +                     | AttrValue'Tensor !Proto.Tensorflow.Core.Framework.Tensor.TensorProto
                              +                     | AttrValue'List !AttrValue'ListValue
                              +                     | AttrValue'Func !NameAttrList
                              +                     | AttrValue'Placeholder !Data.Text.Text
                              +                     deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Maybe AttrValue'Value,
                              +          b ~ Prelude.Maybe AttrValue'Value, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe Data.ByteString.ByteString,
                              +          b ~ Prelude.Maybe Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe's" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'S x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'S y__))
                              +
                              +instance (a ~ Data.ByteString.ByteString,
                              +          b ~ Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "s" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'S x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'S y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~ Prelude.Maybe Data.Int.Int64,
                              +          b ~ Prelude.Maybe Data.Int.Int64, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'i" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'I x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'I y__))
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "i" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'I x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'I y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~ Prelude.Maybe Prelude.Float,
                              +          b ~ Prelude.Maybe Prelude.Float, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'f" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'F x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'F y__))
                              +
                              +instance (a ~ Prelude.Float, b ~ Prelude.Float,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "f" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'F x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'F y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~ Prelude.Maybe Prelude.Bool,
                              +          b ~ Prelude.Maybe Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'b" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'B x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'B y__))
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "b" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'B x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'B y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Prelude.Maybe Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'type'" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'Type x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'Type y__))
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "type'" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'Type x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'Type y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'shape" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'Shape x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'Shape y__))
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "shape" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'Shape x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'Shape y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'tensor" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'Tensor x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'Tensor y__))
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensor" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'Tensor x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'Tensor y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe AttrValue'ListValue,
                              +          b ~ Prelude.Maybe AttrValue'ListValue, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'list" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'List x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'List y__))
                              +
                              +instance (a ~ AttrValue'ListValue, b ~ AttrValue'ListValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "list" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'List x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'List y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe NameAttrList,
                              +          b ~ Prelude.Maybe NameAttrList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'func" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'Func x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'Func y__))
                              +
                              +instance (a ~ NameAttrList, b ~ NameAttrList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "func" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'Func x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'Func y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe Data.Text.Text,
                              +          b ~ Prelude.Maybe Data.Text.Text, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'placeholder" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (AttrValue'Placeholder x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap AttrValue'Placeholder y__))
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "placeholder" f AttrValue AttrValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'value
                              +                 (\ x__ y__ -> x__{_AttrValue'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (AttrValue'Placeholder x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap AttrValue'Placeholder y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance Data.Default.Class.Default AttrValue where
                              +        def = AttrValue{_AttrValue'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message AttrValue where
                              +        descriptor
                              +          = let s__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "s"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.OptionalField maybe's)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +                i__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "i"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.OptionalField maybe'i)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +                f__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "f"
                              +                      (Data.ProtoLens.FloatField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
                              +                      (Data.ProtoLens.OptionalField maybe'f)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +                b__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "b"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.OptionalField maybe'b)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +                type'__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "type"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.OptionalField maybe'type')
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +                shape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "shape"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
                              +                      (Data.ProtoLens.OptionalField maybe'shape)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +                tensor__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
                              +                      (Data.ProtoLens.OptionalField maybe'tensor)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +                list__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor AttrValue'ListValue)
                              +                      (Data.ProtoLens.OptionalField maybe'list)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +                func__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "func"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor NameAttrList)
                              +                      (Data.ProtoLens.OptionalField maybe'func)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +                placeholder__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "placeholder"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.OptionalField maybe'placeholder)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.AttrValue")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 2, s__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, i__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, f__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, b__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, type'__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, shape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, tensor__field_descriptor),
                              +                    (Data.ProtoLens.Tag 1, list__field_descriptor),
                              +                    (Data.ProtoLens.Tag 10, func__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, placeholder__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("s", s__field_descriptor), ("i", i__field_descriptor),
                              +                    ("f", f__field_descriptor), ("b", b__field_descriptor),
                              +                    ("type", type'__field_descriptor),
                              +                    ("shape", shape__field_descriptor),
                              +                    ("tensor", tensor__field_descriptor),
                              +                    ("list", list__field_descriptor), ("func", func__field_descriptor),
                              +                    ("placeholder", placeholder__field_descriptor)])
                              +
                              +data AttrValue'ListValue = AttrValue'ListValue{_AttrValue'ListValue's
                              +                                               :: ![Data.ByteString.ByteString],
                              +                                               _AttrValue'ListValue'i :: ![Data.Int.Int64],
                              +                                               _AttrValue'ListValue'f :: ![Prelude.Float],
                              +                                               _AttrValue'ListValue'b :: ![Prelude.Bool],
                              +                                               _AttrValue'ListValue'type' ::
                              +                                               ![Proto.Tensorflow.Core.Framework.Types.DataType],
                              +                                               _AttrValue'ListValue'shape ::
                              +                                               ![Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto],
                              +                                               _AttrValue'ListValue'tensor ::
                              +                                               ![Proto.Tensorflow.Core.Framework.Tensor.TensorProto],
                              +                                               _AttrValue'ListValue'func :: ![NameAttrList]}
                              +                         deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Data.ByteString.ByteString],
                              +          b ~ [Data.ByteString.ByteString], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "s" f AttrValue'ListValue AttrValue'ListValue a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'ListValue's
                              +                 (\ x__ y__ -> x__{_AttrValue'ListValue's = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int64], b ~ [Data.Int.Int64],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "i" f AttrValue'ListValue AttrValue'ListValue a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'ListValue'i
                              +                 (\ x__ y__ -> x__{_AttrValue'ListValue'i = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Prelude.Float], b ~ [Prelude.Float],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "f" f AttrValue'ListValue AttrValue'ListValue a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'ListValue'f
                              +                 (\ x__ y__ -> x__{_AttrValue'ListValue'f = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Prelude.Bool], b ~ [Prelude.Bool],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "b" f AttrValue'ListValue AttrValue'ListValue a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'ListValue'b
                              +                 (\ x__ y__ -> x__{_AttrValue'ListValue'b = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Proto.Tensorflow.Core.Framework.Types.DataType],
                              +          b ~ [Proto.Tensorflow.Core.Framework.Types.DataType],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "type'" f AttrValue'ListValue
                              +           AttrValue'ListValue
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'ListValue'type'
                              +                 (\ x__ y__ -> x__{_AttrValue'ListValue'type' = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            [Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto],
                              +          b ~ [Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "shape" f AttrValue'ListValue
                              +           AttrValue'ListValue
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'ListValue'shape
                              +                 (\ x__ y__ -> x__{_AttrValue'ListValue'shape = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Proto.Tensorflow.Core.Framework.Tensor.TensorProto],
                              +          b ~ [Proto.Tensorflow.Core.Framework.Tensor.TensorProto],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensor" f AttrValue'ListValue
                              +           AttrValue'ListValue
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'ListValue'tensor
                              +                 (\ x__ y__ -> x__{_AttrValue'ListValue'tensor = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [NameAttrList], b ~ [NameAttrList],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "func" f AttrValue'ListValue
                              +           AttrValue'ListValue
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AttrValue'ListValue'func
                              +                 (\ x__ y__ -> x__{_AttrValue'ListValue'func = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default AttrValue'ListValue where
                              +        def
                              +          = AttrValue'ListValue{_AttrValue'ListValue's = [],
                              +                                _AttrValue'ListValue'i = [], _AttrValue'ListValue'f = [],
                              +                                _AttrValue'ListValue'b = [], _AttrValue'ListValue'type' = [],
                              +                                _AttrValue'ListValue'shape = [], _AttrValue'ListValue'tensor = [],
                              +                                _AttrValue'ListValue'func = []}
                              +
                              +instance Data.ProtoLens.Message AttrValue'ListValue where
                              +        descriptor
                              +          = let s__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "s"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked s)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue'ListValue
                              +                i__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "i"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed i)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue'ListValue
                              +                f__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "f"
                              +                      (Data.ProtoLens.FloatField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed f)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue'ListValue
                              +                b__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "b"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed b)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue'ListValue
                              +                type'__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "type"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed type')
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue'ListValue
                              +                shape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "shape"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked shape)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue'ListValue
                              +                tensor__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked tensor)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue'ListValue
                              +                func__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "func"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor NameAttrList)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked func)
                              +                      :: Data.ProtoLens.FieldDescriptor AttrValue'ListValue
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.AttrValue.ListValue")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 2, s__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, i__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, f__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, b__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, type'__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, shape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, tensor__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, func__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("s", s__field_descriptor), ("i", i__field_descriptor),
                              +                    ("f", f__field_descriptor), ("b", b__field_descriptor),
                              +                    ("type", type'__field_descriptor),
                              +                    ("shape", shape__field_descriptor),
                              +                    ("tensor", tensor__field_descriptor),
                              +                    ("func", func__field_descriptor)])
                              +
                              +data NameAttrList = NameAttrList{_NameAttrList'name ::
                              +                                 !Data.Text.Text,
                              +                                 _NameAttrList'attr :: !(Data.Map.Map Data.Text.Text AttrValue)}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f NameAttrList NameAttrList a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NameAttrList'name
                              +                 (\ x__ y__ -> x__{_NameAttrList'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text AttrValue,
                              +          b ~ Data.Map.Map Data.Text.Text AttrValue, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "attr" f NameAttrList NameAttrList a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NameAttrList'attr
                              +                 (\ x__ y__ -> x__{_NameAttrList'attr = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default NameAttrList where
                              +        def
                              +          = NameAttrList{_NameAttrList'name = Data.ProtoLens.fieldDefault,
                              +                         _NameAttrList'attr = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message NameAttrList where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor NameAttrList
                              +                attr__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "attr"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor NameAttrList'AttrEntry)
                              +                      (Data.ProtoLens.MapField key value attr)
                              +                      :: Data.ProtoLens.FieldDescriptor NameAttrList
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.NameAttrList")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, attr__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("attr", attr__field_descriptor)])
                              +
                              +data NameAttrList'AttrEntry = NameAttrList'AttrEntry{_NameAttrList'AttrEntry'key
                              +                                                     :: !Data.Text.Text,
                              +                                                     _NameAttrList'AttrEntry'value ::
                              +                                                     !(Prelude.Maybe AttrValue)}
                              +                            deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f NameAttrList'AttrEntry
                              +           NameAttrList'AttrEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NameAttrList'AttrEntry'key
                              +                 (\ x__ y__ -> x__{_NameAttrList'AttrEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ AttrValue, b ~ AttrValue, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f NameAttrList'AttrEntry
                              +           NameAttrList'AttrEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NameAttrList'AttrEntry'value
                              +                 (\ x__ y__ -> x__{_NameAttrList'AttrEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe AttrValue, b ~ Prelude.Maybe AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f NameAttrList'AttrEntry
                              +           NameAttrList'AttrEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NameAttrList'AttrEntry'value
                              +                 (\ x__ y__ -> x__{_NameAttrList'AttrEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default NameAttrList'AttrEntry where
                              +        def
                              +          = NameAttrList'AttrEntry{_NameAttrList'AttrEntry'key =
                              +                                     Data.ProtoLens.fieldDefault,
                              +                                   _NameAttrList'AttrEntry'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message NameAttrList'AttrEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor NameAttrList'AttrEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor AttrValue)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor NameAttrList'AttrEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.NameAttrList.AttrEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +attr ::
                              +     forall f s t a b . (Lens.Labels.HasLens "attr" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +attr
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "attr")
                              +
                              +b ::
                              +  forall f s t a b . (Lens.Labels.HasLens "b" f s t a b) =>
                              +    Lens.Family2.LensLike f s t a b
                              +b = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "b")
                              +
                              +f ::
                              +  forall f s t a b . (Lens.Labels.HasLens "f" f s t a b) =>
                              +    Lens.Family2.LensLike f s t a b
                              +f = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "f")
                              +
                              +func ::
                              +     forall f s t a b . (Lens.Labels.HasLens "func" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +func
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "func")
                              +
                              +i ::
                              +  forall f s t a b . (Lens.Labels.HasLens "i" f s t a b) =>
                              +    Lens.Family2.LensLike f s t a b
                              +i = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "i")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +list ::
                              +     forall f s t a b . (Lens.Labels.HasLens "list" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +list
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "list")
                              +
                              +maybe'b ::
                              +        forall f s t a b . (Lens.Labels.HasLens "maybe'b" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +maybe'b
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'b")
                              +
                              +maybe'f ::
                              +        forall f s t a b . (Lens.Labels.HasLens "maybe'f" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +maybe'f
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'f")
                              +
                              +maybe'func ::
                              +           forall f s t a b . (Lens.Labels.HasLens "maybe'func" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +maybe'func
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'func")
                              +
                              +maybe'i ::
                              +        forall f s t a b . (Lens.Labels.HasLens "maybe'i" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +maybe'i
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'i")
                              +
                              +maybe'list ::
                              +           forall f s t a b . (Lens.Labels.HasLens "maybe'list" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +maybe'list
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'list")
                              +
                              +maybe'placeholder ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'placeholder" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'placeholder
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'placeholder")
                              +
                              +maybe's ::
                              +        forall f s t a b . (Lens.Labels.HasLens "maybe's" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +maybe's
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe's")
                              +
                              +maybe'shape ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'shape" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'shape")
                              +
                              +maybe'tensor ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "maybe'tensor" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +maybe'tensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'tensor")
                              +
                              +maybe'type' ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'type'" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'type'
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'type'")
                              +
                              +maybe'value ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'value" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'value")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +placeholder ::
                              +            forall f s t a b . (Lens.Labels.HasLens "placeholder" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +placeholder
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "placeholder")
                              +
                              +s ::
                              +  forall f s t a b . (Lens.Labels.HasLens "s" f s t a b) =>
                              +    Lens.Family2.LensLike f s t a b
                              +s = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "s")
                              +
                              +shape ::
                              +      forall f s t a b . (Lens.Labels.HasLens "shape" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "shape")
                              +
                              +tensor ::
                              +       forall f s t a b . (Lens.Labels.HasLens "tensor" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +tensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensor")
                              +
                              +type' ::
                              +      forall f s t a b . (Lens.Labels.HasLens "type'" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +type'
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "type'")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.CostGraph.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.CostGraph.html new file mode 100644 index 0000000..d15ae52 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.CostGraph.html @@ -0,0 +1,821 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/cost_graph.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.CostGraph where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.TensorShape
                              +import qualified Proto.Tensorflow.Core.Framework.Types
                              +
                              +data CostGraphDef = CostGraphDef{_CostGraphDef'node ::
                              +                                 ![CostGraphDef'Node]}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [CostGraphDef'Node], b ~ [CostGraphDef'Node],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "node" f CostGraphDef CostGraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'node
                              +                 (\ x__ y__ -> x__{_CostGraphDef'node = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CostGraphDef where
                              +        def = CostGraphDef{_CostGraphDef'node = []}
                              +
                              +instance Data.ProtoLens.Message CostGraphDef where
                              +        descriptor
                              +          = let node__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "node"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CostGraphDef'Node)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked node)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CostGraphDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, node__field_descriptor)])
                              +                (Data.Map.fromList [("node", node__field_descriptor)])
                              +
                              +data CostGraphDef'Node = CostGraphDef'Node{_CostGraphDef'Node'name
                              +                                           :: !Data.Text.Text,
                              +                                           _CostGraphDef'Node'device :: !Data.Text.Text,
                              +                                           _CostGraphDef'Node'id :: !Data.Int.Int32,
                              +                                           _CostGraphDef'Node'inputInfo ::
                              +                                           ![CostGraphDef'Node'InputInfo],
                              +                                           _CostGraphDef'Node'outputInfo ::
                              +                                           ![CostGraphDef'Node'OutputInfo],
                              +                                           _CostGraphDef'Node'temporaryMemorySize ::
                              +                                           !Data.Int.Int64,
                              +                                           _CostGraphDef'Node'hostTempMemorySize :: !Data.Int.Int64,
                              +                                           _CostGraphDef'Node'deviceTempMemorySize ::
                              +                                           !Data.Int.Int64,
                              +                                           _CostGraphDef'Node'hostPersistentMemorySize ::
                              +                                           !Data.Int.Int64,
                              +                                           _CostGraphDef'Node'devicePersistentMemorySize ::
                              +                                           !Data.Int.Int64,
                              +                                           _CostGraphDef'Node'computeCost :: !Data.Int.Int64,
                              +                                           _CostGraphDef'Node'computeTime :: !Data.Int.Int64,
                              +                                           _CostGraphDef'Node'memoryTime :: !Data.Int.Int64,
                              +                                           _CostGraphDef'Node'isFinal :: !Prelude.Bool,
                              +                                           _CostGraphDef'Node'controlInput :: ![Data.Int.Int32]}
                              +                       deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f CostGraphDef'Node CostGraphDef'Node a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'name
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "device" f CostGraphDef'Node CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'device
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'device = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "id" f CostGraphDef'Node CostGraphDef'Node a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'id
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'id = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [CostGraphDef'Node'InputInfo],
                              +          b ~ [CostGraphDef'Node'InputInfo], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "inputInfo" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'inputInfo
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'inputInfo = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [CostGraphDef'Node'OutputInfo],
                              +          b ~ [CostGraphDef'Node'OutputInfo], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "outputInfo" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'outputInfo
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'outputInfo = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "temporaryMemorySize" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'temporaryMemorySize
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'temporaryMemorySize = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hostTempMemorySize" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'hostTempMemorySize
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'hostTempMemorySize = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deviceTempMemorySize" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _CostGraphDef'Node'deviceTempMemorySize
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'deviceTempMemorySize = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hostPersistentMemorySize" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _CostGraphDef'Node'hostPersistentMemorySize
                              +                 (\ x__ y__ ->
                              +                    x__{_CostGraphDef'Node'hostPersistentMemorySize = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "devicePersistentMemorySize" f
                              +           CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _CostGraphDef'Node'devicePersistentMemorySize
                              +                 (\ x__ y__ ->
                              +                    x__{_CostGraphDef'Node'devicePersistentMemorySize = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "computeCost" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'computeCost
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'computeCost = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "computeTime" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'computeTime
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'computeTime = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "memoryTime" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'memoryTime
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'memoryTime = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "isFinal" f CostGraphDef'Node CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'isFinal
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'isFinal = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int32], b ~ [Data.Int.Int32],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "controlInput" f CostGraphDef'Node
                              +           CostGraphDef'Node
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'controlInput
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'controlInput = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CostGraphDef'Node where
                              +        def
                              +          = CostGraphDef'Node{_CostGraphDef'Node'name =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'device = Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'id = Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'inputInfo = [],
                              +                              _CostGraphDef'Node'outputInfo = [],
                              +                              _CostGraphDef'Node'temporaryMemorySize =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'hostTempMemorySize =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'deviceTempMemorySize =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'hostPersistentMemorySize =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'devicePersistentMemorySize =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'computeCost = Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'computeTime = Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'memoryTime = Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'isFinal = Data.ProtoLens.fieldDefault,
                              +                              _CostGraphDef'Node'controlInput = []}
                              +
                              +instance Data.ProtoLens.Message CostGraphDef'Node where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                device__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                id__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "id"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional id)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                inputInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "input_info"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CostGraphDef'Node'InputInfo)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked inputInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                outputInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "output_info"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CostGraphDef'Node'OutputInfo)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked outputInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                temporaryMemorySize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "temporary_memory_size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         temporaryMemorySize)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                hostTempMemorySize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "host_temp_memory_size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         hostTempMemorySize)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                deviceTempMemorySize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device_temp_memory_size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         deviceTempMemorySize)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                hostPersistentMemorySize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "host_persistent_memory_size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         hostPersistentMemorySize)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                devicePersistentMemorySize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device_persistent_memory_size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         devicePersistentMemorySize)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                computeCost__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "compute_cost"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional computeCost)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                computeTime__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "compute_time"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional computeTime)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                memoryTime__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "memory_time"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional memoryTime)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                isFinal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "is_final"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isFinal)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +                controlInput__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "control_input"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed controlInput)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CostGraphDef.Node")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, device__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, id__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, inputInfo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, outputInfo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, temporaryMemorySize__field_descriptor),
                              +                    (Data.ProtoLens.Tag 10, hostTempMemorySize__field_descriptor),
                              +                    (Data.ProtoLens.Tag 11, deviceTempMemorySize__field_descriptor),
                              +                    (Data.ProtoLens.Tag 12,
                              +                     hostPersistentMemorySize__field_descriptor),
                              +                    (Data.ProtoLens.Tag 16,
                              +                     devicePersistentMemorySize__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, computeCost__field_descriptor),
                              +                    (Data.ProtoLens.Tag 14, computeTime__field_descriptor),
                              +                    (Data.ProtoLens.Tag 15, memoryTime__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, isFinal__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, controlInput__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("device", device__field_descriptor), ("id", id__field_descriptor),
                              +                    ("input_info", inputInfo__field_descriptor),
                              +                    ("output_info", outputInfo__field_descriptor),
                              +                    ("temporary_memory_size", temporaryMemorySize__field_descriptor),
                              +                    ("host_temp_memory_size", hostTempMemorySize__field_descriptor),
                              +                    ("device_temp_memory_size",
                              +                     deviceTempMemorySize__field_descriptor),
                              +                    ("host_persistent_memory_size",
                              +                     hostPersistentMemorySize__field_descriptor),
                              +                    ("device_persistent_memory_size",
                              +                     devicePersistentMemorySize__field_descriptor),
                              +                    ("compute_cost", computeCost__field_descriptor),
                              +                    ("compute_time", computeTime__field_descriptor),
                              +                    ("memory_time", memoryTime__field_descriptor),
                              +                    ("is_final", isFinal__field_descriptor),
                              +                    ("control_input", controlInput__field_descriptor)])
                              +
                              +data CostGraphDef'Node'InputInfo = CostGraphDef'Node'InputInfo{_CostGraphDef'Node'InputInfo'precedingNode
                              +                                                               :: !Data.Int.Int32,
                              +                                                               _CostGraphDef'Node'InputInfo'precedingPort
                              +                                                               :: !Data.Int.Int32}
                              +                                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "precedingNode" f CostGraphDef'Node'InputInfo
                              +           CostGraphDef'Node'InputInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _CostGraphDef'Node'InputInfo'precedingNode
                              +                 (\ x__ y__ ->
                              +                    x__{_CostGraphDef'Node'InputInfo'precedingNode = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "precedingPort" f CostGraphDef'Node'InputInfo
                              +           CostGraphDef'Node'InputInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _CostGraphDef'Node'InputInfo'precedingPort
                              +                 (\ x__ y__ ->
                              +                    x__{_CostGraphDef'Node'InputInfo'precedingPort = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CostGraphDef'Node'InputInfo
                              +         where
                              +        def
                              +          = CostGraphDef'Node'InputInfo{_CostGraphDef'Node'InputInfo'precedingNode
                              +                                          = Data.ProtoLens.fieldDefault,
                              +                                        _CostGraphDef'Node'InputInfo'precedingPort =
                              +                                          Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message CostGraphDef'Node'InputInfo where
                              +        descriptor
                              +          = let precedingNode__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "preceding_node"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional precedingNode)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node'InputInfo
                              +                precedingPort__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "preceding_port"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional precedingPort)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node'InputInfo
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CostGraphDef.Node.InputInfo")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, precedingNode__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, precedingPort__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("preceding_node", precedingNode__field_descriptor),
                              +                    ("preceding_port", precedingPort__field_descriptor)])
                              +
                              +data CostGraphDef'Node'OutputInfo = CostGraphDef'Node'OutputInfo{_CostGraphDef'Node'OutputInfo'size
                              +                                                                 :: !Data.Int.Int64,
                              +                                                                 _CostGraphDef'Node'OutputInfo'aliasInputPort
                              +                                                                 :: !Data.Int.Int64,
                              +                                                                 _CostGraphDef'Node'OutputInfo'shape
                              +                                                                 ::
                              +                                                                 !(Prelude.Maybe
                              +                                                                     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto),
                              +                                                                 _CostGraphDef'Node'OutputInfo'dtype
                              +                                                                 ::
                              +                                                                 !Proto.Tensorflow.Core.Framework.Types.DataType}
                              +                                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "size" f CostGraphDef'Node'OutputInfo
                              +           CostGraphDef'Node'OutputInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'size
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'size = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "aliasInputPort" f CostGraphDef'Node'OutputInfo
                              +           CostGraphDef'Node'OutputInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _CostGraphDef'Node'OutputInfo'aliasInputPort
                              +                 (\ x__ y__ ->
                              +                    x__{_CostGraphDef'Node'OutputInfo'aliasInputPort = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "shape" f CostGraphDef'Node'OutputInfo
                              +           CostGraphDef'Node'OutputInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'shape
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'shape = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'shape" f CostGraphDef'Node'OutputInfo
                              +           CostGraphDef'Node'OutputInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'shape
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'shape = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "dtype" f CostGraphDef'Node'OutputInfo
                              +           CostGraphDef'Node'OutputInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'dtype
                              +                 (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'dtype = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CostGraphDef'Node'OutputInfo
                              +         where
                              +        def
                              +          = CostGraphDef'Node'OutputInfo{_CostGraphDef'Node'OutputInfo'size =
                              +                                           Data.ProtoLens.fieldDefault,
                              +                                         _CostGraphDef'Node'OutputInfo'aliasInputPort =
                              +                                           Data.ProtoLens.fieldDefault,
                              +                                         _CostGraphDef'Node'OutputInfo'shape = Prelude.Nothing,
                              +                                         _CostGraphDef'Node'OutputInfo'dtype =
                              +                                           Data.Default.Class.def}
                              +
                              +instance Data.ProtoLens.Message CostGraphDef'Node'OutputInfo where
                              +        descriptor
                              +          = let size__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional size)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node'OutputInfo
                              +                aliasInputPort__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "alias_input_port"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional aliasInputPort)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node'OutputInfo
                              +                shape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "shape"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
                              +                      (Data.ProtoLens.OptionalField maybe'shape)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node'OutputInfo
                              +                dtype__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dtype"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
                              +                      :: Data.ProtoLens.FieldDescriptor CostGraphDef'Node'OutputInfo
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CostGraphDef.Node.OutputInfo")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, size__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, aliasInputPort__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, shape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, dtype__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("size", size__field_descriptor),
                              +                    ("alias_input_port", aliasInputPort__field_descriptor),
                              +                    ("shape", shape__field_descriptor),
                              +                    ("dtype", dtype__field_descriptor)])
                              +
                              +aliasInputPort ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "aliasInputPort" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +aliasInputPort
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "aliasInputPort")
                              +
                              +computeCost ::
                              +            forall f s t a b . (Lens.Labels.HasLens "computeCost" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +computeCost
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "computeCost")
                              +
                              +computeTime ::
                              +            forall f s t a b . (Lens.Labels.HasLens "computeTime" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +computeTime
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "computeTime")
                              +
                              +controlInput ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "controlInput" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +controlInput
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "controlInput")
                              +
                              +device ::
                              +       forall f s t a b . (Lens.Labels.HasLens "device" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +device
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "device")
                              +
                              +devicePersistentMemorySize ::
                              +                           forall f s t a b .
                              +                             (Lens.Labels.HasLens "devicePersistentMemorySize" f s t a b) =>
                              +                             Lens.Family2.LensLike f s t a b
                              +devicePersistentMemorySize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "devicePersistentMemorySize")
                              +
                              +deviceTempMemorySize ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "deviceTempMemorySize" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +deviceTempMemorySize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "deviceTempMemorySize")
                              +
                              +dtype ::
                              +      forall f s t a b . (Lens.Labels.HasLens "dtype" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +dtype
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "dtype")
                              +
                              +hostPersistentMemorySize ::
                              +                         forall f s t a b .
                              +                           (Lens.Labels.HasLens "hostPersistentMemorySize" f s t a b) =>
                              +                           Lens.Family2.LensLike f s t a b
                              +hostPersistentMemorySize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "hostPersistentMemorySize")
                              +
                              +hostTempMemorySize ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "hostTempMemorySize" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +hostTempMemorySize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "hostTempMemorySize")
                              +
                              +id ::
                              +   forall f s t a b . (Lens.Labels.HasLens "id" f s t a b) =>
                              +     Lens.Family2.LensLike f s t a b
                              +id
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "id")
                              +
                              +inputInfo ::
                              +          forall f s t a b . (Lens.Labels.HasLens "inputInfo" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +inputInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "inputInfo")
                              +
                              +isFinal ::
                              +        forall f s t a b . (Lens.Labels.HasLens "isFinal" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +isFinal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "isFinal")
                              +
                              +maybe'shape ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'shape" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'shape")
                              +
                              +memoryTime ::
                              +           forall f s t a b . (Lens.Labels.HasLens "memoryTime" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +memoryTime
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "memoryTime")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +node ::
                              +     forall f s t a b . (Lens.Labels.HasLens "node" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +node
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "node")
                              +
                              +outputInfo ::
                              +           forall f s t a b . (Lens.Labels.HasLens "outputInfo" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +outputInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "outputInfo")
                              +
                              +precedingNode ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "precedingNode" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +precedingNode
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "precedingNode")
                              +
                              +precedingPort ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "precedingPort" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +precedingPort
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "precedingPort")
                              +
                              +shape ::
                              +      forall f s t a b . (Lens.Labels.HasLens "shape" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "shape")
                              +
                              +size ::
                              +     forall f s t a b . (Lens.Labels.HasLens "size" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +size
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "size")
                              +
                              +temporaryMemorySize ::
                              +                    forall f s t a b .
                              +                      (Lens.Labels.HasLens "temporaryMemorySize" f s t a b) =>
                              +                      Lens.Family2.LensLike f s t a b
                              +temporaryMemorySize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "temporaryMemorySize")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.DeviceAttributes.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.DeviceAttributes.html new file mode 100644 index 0000000..6b3079d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.DeviceAttributes.html @@ -0,0 +1,282 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/device_attributes.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.DeviceAttributes where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data DeviceAttributes = DeviceAttributes{_DeviceAttributes'name ::
                              +                                         !Data.Text.Text,
                              +                                         _DeviceAttributes'deviceType :: !Data.Text.Text,
                              +                                         _DeviceAttributes'memoryLimit :: !Data.Int.Int64,
                              +                                         _DeviceAttributes'locality ::
                              +                                         !(Prelude.Maybe DeviceLocality),
                              +                                         _DeviceAttributes'incarnation :: !Data.Word.Word64,
                              +                                         _DeviceAttributes'physicalDeviceDesc :: !Data.Text.Text}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f DeviceAttributes DeviceAttributes a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceAttributes'name
                              +                 (\ x__ y__ -> x__{_DeviceAttributes'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deviceType" f DeviceAttributes
                              +           DeviceAttributes
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceAttributes'deviceType
                              +                 (\ x__ y__ -> x__{_DeviceAttributes'deviceType = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "memoryLimit" f DeviceAttributes
                              +           DeviceAttributes
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceAttributes'memoryLimit
                              +                 (\ x__ y__ -> x__{_DeviceAttributes'memoryLimit = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ DeviceLocality, b ~ DeviceLocality,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "locality" f DeviceAttributes DeviceAttributes
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceAttributes'locality
                              +                 (\ x__ y__ -> x__{_DeviceAttributes'locality = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe DeviceLocality,
                              +          b ~ Prelude.Maybe DeviceLocality, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'locality" f DeviceAttributes
                              +           DeviceAttributes
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceAttributes'locality
                              +                 (\ x__ y__ -> x__{_DeviceAttributes'locality = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Word.Word64, b ~ Data.Word.Word64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "incarnation" f DeviceAttributes
                              +           DeviceAttributes
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceAttributes'incarnation
                              +                 (\ x__ y__ -> x__{_DeviceAttributes'incarnation = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "physicalDeviceDesc" f DeviceAttributes
                              +           DeviceAttributes
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceAttributes'physicalDeviceDesc
                              +                 (\ x__ y__ -> x__{_DeviceAttributes'physicalDeviceDesc = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default DeviceAttributes where
                              +        def
                              +          = DeviceAttributes{_DeviceAttributes'name =
                              +                               Data.ProtoLens.fieldDefault,
                              +                             _DeviceAttributes'deviceType = Data.ProtoLens.fieldDefault,
                              +                             _DeviceAttributes'memoryLimit = Data.ProtoLens.fieldDefault,
                              +                             _DeviceAttributes'locality = Prelude.Nothing,
                              +                             _DeviceAttributes'incarnation = Data.ProtoLens.fieldDefault,
                              +                             _DeviceAttributes'physicalDeviceDesc = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message DeviceAttributes where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor DeviceAttributes
                              +                deviceType__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device_type"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional deviceType)
                              +                      :: Data.ProtoLens.FieldDescriptor DeviceAttributes
                              +                memoryLimit__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "memory_limit"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional memoryLimit)
                              +                      :: Data.ProtoLens.FieldDescriptor DeviceAttributes
                              +                locality__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "locality"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor DeviceLocality)
                              +                      (Data.ProtoLens.OptionalField maybe'locality)
                              +                      :: Data.ProtoLens.FieldDescriptor DeviceAttributes
                              +                incarnation__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "incarnation"
                              +                      (Data.ProtoLens.Fixed64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional incarnation)
                              +                      :: Data.ProtoLens.FieldDescriptor DeviceAttributes
                              +                physicalDeviceDesc__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "physical_device_desc"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         physicalDeviceDesc)
                              +                      :: Data.ProtoLens.FieldDescriptor DeviceAttributes
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.DeviceAttributes")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, deviceType__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, memoryLimit__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, locality__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, incarnation__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, physicalDeviceDesc__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("device_type", deviceType__field_descriptor),
                              +                    ("memory_limit", memoryLimit__field_descriptor),
                              +                    ("locality", locality__field_descriptor),
                              +                    ("incarnation", incarnation__field_descriptor),
                              +                    ("physical_device_desc", physicalDeviceDesc__field_descriptor)])
                              +
                              +data DeviceLocality = DeviceLocality{_DeviceLocality'busId ::
                              +                                     !Data.Int.Int32}
                              +                    deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "busId" f DeviceLocality DeviceLocality a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceLocality'busId
                              +                 (\ x__ y__ -> x__{_DeviceLocality'busId = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default DeviceLocality where
                              +        def
                              +          = DeviceLocality{_DeviceLocality'busId =
                              +                             Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message DeviceLocality where
                              +        descriptor
                              +          = let busId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "bus_id"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional busId)
                              +                      :: Data.ProtoLens.FieldDescriptor DeviceLocality
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.DeviceLocality")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, busId__field_descriptor)])
                              +                (Data.Map.fromList [("bus_id", busId__field_descriptor)])
                              +
                              +busId ::
                              +      forall f s t a b . (Lens.Labels.HasLens "busId" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +busId
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "busId")
                              +
                              +deviceType ::
                              +           forall f s t a b . (Lens.Labels.HasLens "deviceType" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +deviceType
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "deviceType")
                              +
                              +incarnation ::
                              +            forall f s t a b . (Lens.Labels.HasLens "incarnation" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +incarnation
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "incarnation")
                              +
                              +locality ::
                              +         forall f s t a b . (Lens.Labels.HasLens "locality" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +locality
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "locality")
                              +
                              +maybe'locality ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'locality" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'locality
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'locality")
                              +
                              +memoryLimit ::
                              +            forall f s t a b . (Lens.Labels.HasLens "memoryLimit" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +memoryLimit
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "memoryLimit")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +physicalDeviceDesc ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "physicalDeviceDesc" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +physicalDeviceDesc
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "physicalDeviceDesc")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Function.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Function.html new file mode 100644 index 0000000..e52f49e --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Function.html @@ -0,0 +1,491 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/function.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.Function where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.AttrValue
                              +import qualified Proto.Tensorflow.Core.Framework.NodeDef
                              +import qualified Proto.Tensorflow.Core.Framework.OpDef
                              +
                              +data FunctionDef = FunctionDef{_FunctionDef'signature ::
                              +                               !(Prelude.Maybe Proto.Tensorflow.Core.Framework.OpDef.OpDef),
                              +                               _FunctionDef'attr ::
                              +                               !(Data.Map.Map Data.Text.Text
                              +                                   Proto.Tensorflow.Core.Framework.AttrValue.AttrValue),
                              +                               _FunctionDef'nodeDef ::
                              +                               ![Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
                              +                               _FunctionDef'ret :: !(Data.Map.Map Data.Text.Text Data.Text.Text)}
                              +                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.OpDef.OpDef,
                              +          b ~ Proto.Tensorflow.Core.Framework.OpDef.OpDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "signature" f FunctionDef FunctionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'signature
                              +                 (\ x__ y__ -> x__{_FunctionDef'signature = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.OpDef.OpDef,
                              +          b ~ Prelude.Maybe Proto.Tensorflow.Core.Framework.OpDef.OpDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'signature" f FunctionDef FunctionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'signature
                              +                 (\ x__ y__ -> x__{_FunctionDef'signature = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Data.Map.Map Data.Text.Text
                              +              Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~
                              +            Data.Map.Map Data.Text.Text
                              +              Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "attr" f FunctionDef FunctionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'attr
                              +                 (\ x__ y__ -> x__{_FunctionDef'attr = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
                              +          b ~ [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "nodeDef" f FunctionDef FunctionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'nodeDef
                              +                 (\ x__ y__ -> x__{_FunctionDef'nodeDef = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text Data.Text.Text,
                              +          b ~ Data.Map.Map Data.Text.Text Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "ret" f FunctionDef FunctionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'ret
                              +                 (\ x__ y__ -> x__{_FunctionDef'ret = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default FunctionDef where
                              +        def
                              +          = FunctionDef{_FunctionDef'signature = Prelude.Nothing,
                              +                        _FunctionDef'attr = Data.Map.empty, _FunctionDef'nodeDef = [],
                              +                        _FunctionDef'ret = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message FunctionDef where
                              +        descriptor
                              +          = let signature__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "signature"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.OpDef.OpDef)
                              +                      (Data.ProtoLens.OptionalField maybe'signature)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDef
                              +                attr__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "attr"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor FunctionDef'AttrEntry)
                              +                      (Data.ProtoLens.MapField key value attr)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDef
                              +                nodeDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "node_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.NodeDef.NodeDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked nodeDef)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDef
                              +                ret__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "ret"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor FunctionDef'RetEntry)
                              +                      (Data.ProtoLens.MapField key value ret)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FunctionDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, signature__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, attr__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, nodeDef__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, ret__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("signature", signature__field_descriptor),
                              +                    ("attr", attr__field_descriptor),
                              +                    ("node_def", nodeDef__field_descriptor),
                              +                    ("ret", ret__field_descriptor)])
                              +
                              +data FunctionDef'AttrEntry = FunctionDef'AttrEntry{_FunctionDef'AttrEntry'key
                              +                                                   :: !Data.Text.Text,
                              +                                                   _FunctionDef'AttrEntry'value ::
                              +                                                   !(Prelude.Maybe
                              +                                                       Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)}
                              +                           deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f FunctionDef'AttrEntry
                              +           FunctionDef'AttrEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'AttrEntry'key
                              +                 (\ x__ y__ -> x__{_FunctionDef'AttrEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f FunctionDef'AttrEntry
                              +           FunctionDef'AttrEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'AttrEntry'value
                              +                 (\ x__ y__ -> x__{_FunctionDef'AttrEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f FunctionDef'AttrEntry
                              +           FunctionDef'AttrEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'AttrEntry'value
                              +                 (\ x__ y__ -> x__{_FunctionDef'AttrEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default FunctionDef'AttrEntry where
                              +        def
                              +          = FunctionDef'AttrEntry{_FunctionDef'AttrEntry'key =
                              +                                    Data.ProtoLens.fieldDefault,
                              +                                  _FunctionDef'AttrEntry'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message FunctionDef'AttrEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDef'AttrEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDef'AttrEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FunctionDef.AttrEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data FunctionDef'RetEntry = FunctionDef'RetEntry{_FunctionDef'RetEntry'key
                              +                                                 :: !Data.Text.Text,
                              +                                                 _FunctionDef'RetEntry'value :: !Data.Text.Text}
                              +                          deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f FunctionDef'RetEntry
                              +           FunctionDef'RetEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'RetEntry'key
                              +                 (\ x__ y__ -> x__{_FunctionDef'RetEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f FunctionDef'RetEntry
                              +           FunctionDef'RetEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDef'RetEntry'value
                              +                 (\ x__ y__ -> x__{_FunctionDef'RetEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default FunctionDef'RetEntry where
                              +        def
                              +          = FunctionDef'RetEntry{_FunctionDef'RetEntry'key =
                              +                                   Data.ProtoLens.fieldDefault,
                              +                                 _FunctionDef'RetEntry'value = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message FunctionDef'RetEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDef'RetEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional value)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDef'RetEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FunctionDef.RetEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data FunctionDefLibrary = FunctionDefLibrary{_FunctionDefLibrary'function
                              +                                             :: ![FunctionDef],
                              +                                             _FunctionDefLibrary'gradient :: ![GradientDef]}
                              +                        deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [FunctionDef], b ~ [FunctionDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "function" f FunctionDefLibrary
                              +           FunctionDefLibrary
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDefLibrary'function
                              +                 (\ x__ y__ -> x__{_FunctionDefLibrary'function = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [GradientDef], b ~ [GradientDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "gradient" f FunctionDefLibrary
                              +           FunctionDefLibrary
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _FunctionDefLibrary'gradient
                              +                 (\ x__ y__ -> x__{_FunctionDefLibrary'gradient = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default FunctionDefLibrary where
                              +        def
                              +          = FunctionDefLibrary{_FunctionDefLibrary'function = [],
                              +                               _FunctionDefLibrary'gradient = []}
                              +
                              +instance Data.ProtoLens.Message FunctionDefLibrary where
                              +        descriptor
                              +          = let function__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "function"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor FunctionDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked function)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDefLibrary
                              +                gradient__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "gradient"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor GradientDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked gradient)
                              +                      :: Data.ProtoLens.FieldDescriptor FunctionDefLibrary
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.FunctionDefLibrary")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, function__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, gradient__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("function", function__field_descriptor),
                              +                    ("gradient", gradient__field_descriptor)])
                              +
                              +data GradientDef = GradientDef{_GradientDef'functionName ::
                              +                               !Data.Text.Text,
                              +                               _GradientDef'gradientFunc :: !Data.Text.Text}
                              +                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "functionName" f GradientDef GradientDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GradientDef'functionName
                              +                 (\ x__ y__ -> x__{_GradientDef'functionName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "gradientFunc" f GradientDef GradientDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GradientDef'gradientFunc
                              +                 (\ x__ y__ -> x__{_GradientDef'gradientFunc = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default GradientDef where
                              +        def
                              +          = GradientDef{_GradientDef'functionName =
                              +                          Data.ProtoLens.fieldDefault,
                              +                        _GradientDef'gradientFunc = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message GradientDef where
                              +        descriptor
                              +          = let functionName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "function_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional functionName)
                              +                      :: Data.ProtoLens.FieldDescriptor GradientDef
                              +                gradientFunc__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "gradient_func"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional gradientFunc)
                              +                      :: Data.ProtoLens.FieldDescriptor GradientDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.GradientDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, functionName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, gradientFunc__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("function_name", functionName__field_descriptor),
                              +                    ("gradient_func", gradientFunc__field_descriptor)])
                              +
                              +attr ::
                              +     forall f s t a b . (Lens.Labels.HasLens "attr" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +attr
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "attr")
                              +
                              +function ::
                              +         forall f s t a b . (Lens.Labels.HasLens "function" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +function
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "function")
                              +
                              +functionName ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "functionName" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +functionName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "functionName")
                              +
                              +gradient ::
                              +         forall f s t a b . (Lens.Labels.HasLens "gradient" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +gradient
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "gradient")
                              +
                              +gradientFunc ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "gradientFunc" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +gradientFunc
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "gradientFunc")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +maybe'signature ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'signature" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'signature
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'signature")
                              +
                              +maybe'value ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'value" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'value")
                              +
                              +nodeDef ::
                              +        forall f s t a b . (Lens.Labels.HasLens "nodeDef" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +nodeDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "nodeDef")
                              +
                              +ret ::
                              +    forall f s t a b . (Lens.Labels.HasLens "ret" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +ret
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "ret")
                              +
                              +signature ::
                              +          forall f s t a b . (Lens.Labels.HasLens "signature" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +signature
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "signature")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Graph.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Graph.html new file mode 100644 index 0000000..cff9d93 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Graph.html @@ -0,0 +1,206 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/graph.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.Graph where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.Function
                              +import qualified Proto.Tensorflow.Core.Framework.NodeDef
                              +import qualified Proto.Tensorflow.Core.Framework.Versions
                              +
                              +data GraphDef = GraphDef{_GraphDef'node ::
                              +                         ![Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
                              +                         _GraphDef'versions ::
                              +                         !(Prelude.Maybe
                              +                             Proto.Tensorflow.Core.Framework.Versions.VersionDef),
                              +                         _GraphDef'version :: !Data.Int.Int32,
                              +                         _GraphDef'library ::
                              +                         !(Prelude.Maybe
                              +                             Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary)}
                              +              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
                              +          b ~ [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "node" f GraphDef GraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphDef'node
                              +                 (\ x__ y__ -> x__{_GraphDef'node = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          b ~ Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "versions" f GraphDef GraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphDef'versions
                              +                 (\ x__ y__ -> x__{_GraphDef'versions = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'versions" f GraphDef GraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphDef'versions
                              +                 (\ x__ y__ -> x__{_GraphDef'versions = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "version" f GraphDef GraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphDef'version
                              +                 (\ x__ y__ -> x__{_GraphDef'version = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary,
                              +          b ~ Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "library" f GraphDef GraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphDef'library
                              +                 (\ x__ y__ -> x__{_GraphDef'library = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'library" f GraphDef GraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphDef'library
                              +                 (\ x__ y__ -> x__{_GraphDef'library = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default GraphDef where
                              +        def
                              +          = GraphDef{_GraphDef'node = [],
                              +                     _GraphDef'versions = Prelude.Nothing,
                              +                     _GraphDef'version = Data.ProtoLens.fieldDefault,
                              +                     _GraphDef'library = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message GraphDef where
                              +        descriptor
                              +          = let node__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "node"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.NodeDef.NodeDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked node)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphDef
                              +                versions__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "versions"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Versions.VersionDef)
                              +                      (Data.ProtoLens.OptionalField maybe'versions)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphDef
                              +                version__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "version"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional version)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphDef
                              +                library__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "library"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary)
                              +                      (Data.ProtoLens.OptionalField maybe'library)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.GraphDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, node__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, versions__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, version__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, library__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("node", node__field_descriptor),
                              +                    ("versions", versions__field_descriptor),
                              +                    ("version", version__field_descriptor),
                              +                    ("library", library__field_descriptor)])
                              +
                              +library ::
                              +        forall f s t a b . (Lens.Labels.HasLens "library" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +library
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "library")
                              +
                              +maybe'library ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybe'library" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybe'library
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'library")
                              +
                              +maybe'versions ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'versions" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'versions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'versions")
                              +
                              +node ::
                              +     forall f s t a b . (Lens.Labels.HasLens "node" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +node
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "node")
                              +
                              +version ::
                              +        forall f s t a b . (Lens.Labels.HasLens "version" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +version
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "version")
                              +
                              +versions ::
                              +         forall f s t a b . (Lens.Labels.HasLens "versions" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +versions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "versions")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.KernelDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.KernelDef.html new file mode 100644 index 0000000..243ca8e --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.KernelDef.html @@ -0,0 +1,284 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/kernel_def.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.KernelDef where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.AttrValue
                              +
                              +data KernelDef = KernelDef{_KernelDef'op :: !Data.Text.Text,
                              +                           _KernelDef'deviceType :: !Data.Text.Text,
                              +                           _KernelDef'constraint :: ![KernelDef'AttrConstraint],
                              +                           _KernelDef'hostMemoryArg :: ![Data.Text.Text],
                              +                           _KernelDef'label :: !Data.Text.Text}
                              +               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "op" f KernelDef KernelDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _KernelDef'op
                              +                 (\ x__ y__ -> x__{_KernelDef'op = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deviceType" f KernelDef KernelDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _KernelDef'deviceType
                              +                 (\ x__ y__ -> x__{_KernelDef'deviceType = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [KernelDef'AttrConstraint],
                              +          b ~ [KernelDef'AttrConstraint], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "constraint" f KernelDef KernelDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _KernelDef'constraint
                              +                 (\ x__ y__ -> x__{_KernelDef'constraint = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hostMemoryArg" f KernelDef KernelDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _KernelDef'hostMemoryArg
                              +                 (\ x__ y__ -> x__{_KernelDef'hostMemoryArg = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "label" f KernelDef KernelDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _KernelDef'label
                              +                 (\ x__ y__ -> x__{_KernelDef'label = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default KernelDef where
                              +        def
                              +          = KernelDef{_KernelDef'op = Data.ProtoLens.fieldDefault,
                              +                      _KernelDef'deviceType = Data.ProtoLens.fieldDefault,
                              +                      _KernelDef'constraint = [], _KernelDef'hostMemoryArg = [],
                              +                      _KernelDef'label = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message KernelDef where
                              +        descriptor
                              +          = let op__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "op"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional op)
                              +                      :: Data.ProtoLens.FieldDescriptor KernelDef
                              +                deviceType__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device_type"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional deviceType)
                              +                      :: Data.ProtoLens.FieldDescriptor KernelDef
                              +                constraint__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "constraint"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor KernelDef'AttrConstraint)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked constraint)
                              +                      :: Data.ProtoLens.FieldDescriptor KernelDef
                              +                hostMemoryArg__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "host_memory_arg"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         hostMemoryArg)
                              +                      :: Data.ProtoLens.FieldDescriptor KernelDef
                              +                label__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "label"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional label)
                              +                      :: Data.ProtoLens.FieldDescriptor KernelDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.KernelDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, op__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, deviceType__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, constraint__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, hostMemoryArg__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, label__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("op", op__field_descriptor),
                              +                    ("device_type", deviceType__field_descriptor),
                              +                    ("constraint", constraint__field_descriptor),
                              +                    ("host_memory_arg", hostMemoryArg__field_descriptor),
                              +                    ("label", label__field_descriptor)])
                              +
                              +data KernelDef'AttrConstraint = KernelDef'AttrConstraint{_KernelDef'AttrConstraint'name
                              +                                                         :: !Data.Text.Text,
                              +                                                         _KernelDef'AttrConstraint'allowedValues ::
                              +                                                         !(Prelude.Maybe
                              +                                                             Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)}
                              +                              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f KernelDef'AttrConstraint
                              +           KernelDef'AttrConstraint
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _KernelDef'AttrConstraint'name
                              +                 (\ x__ y__ -> x__{_KernelDef'AttrConstraint'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allowedValues" f KernelDef'AttrConstraint
                              +           KernelDef'AttrConstraint
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _KernelDef'AttrConstraint'allowedValues
                              +                 (\ x__ y__ -> x__{_KernelDef'AttrConstraint'allowedValues = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'allowedValues" f
                              +           KernelDef'AttrConstraint
                              +           KernelDef'AttrConstraint
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _KernelDef'AttrConstraint'allowedValues
                              +                 (\ x__ y__ -> x__{_KernelDef'AttrConstraint'allowedValues = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default KernelDef'AttrConstraint where
                              +        def
                              +          = KernelDef'AttrConstraint{_KernelDef'AttrConstraint'name =
                              +                                       Data.ProtoLens.fieldDefault,
                              +                                     _KernelDef'AttrConstraint'allowedValues = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message KernelDef'AttrConstraint where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor KernelDef'AttrConstraint
                              +                allowedValues__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allowed_values"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
                              +                      (Data.ProtoLens.OptionalField maybe'allowedValues)
                              +                      :: Data.ProtoLens.FieldDescriptor KernelDef'AttrConstraint
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.KernelDef.AttrConstraint")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, allowedValues__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("allowed_values", allowedValues__field_descriptor)])
                              +
                              +allowedValues ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "allowedValues" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +allowedValues
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allowedValues")
                              +
                              +constraint ::
                              +           forall f s t a b . (Lens.Labels.HasLens "constraint" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +constraint
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "constraint")
                              +
                              +deviceType ::
                              +           forall f s t a b . (Lens.Labels.HasLens "deviceType" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +deviceType
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "deviceType")
                              +
                              +hostMemoryArg ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "hostMemoryArg" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +hostMemoryArg
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "hostMemoryArg")
                              +
                              +label ::
                              +      forall f s t a b . (Lens.Labels.HasLens "label" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +label
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "label")
                              +
                              +maybe'allowedValues ::
                              +                    forall f s t a b .
                              +                      (Lens.Labels.HasLens "maybe'allowedValues" f s t a b) =>
                              +                      Lens.Family2.LensLike f s t a b
                              +maybe'allowedValues
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'allowedValues")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +op ::
                              +   forall f s t a b . (Lens.Labels.HasLens "op" f s t a b) =>
                              +     Lens.Family2.LensLike f s t a b
                              +op
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "op")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.LogMemory.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.LogMemory.html new file mode 100644 index 0000000..1db883c --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.LogMemory.html @@ -0,0 +1,777 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/log_memory.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.LogMemory where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.TensorDescription
                              +
                              +data MemoryLogRawAllocation = MemoryLogRawAllocation{_MemoryLogRawAllocation'stepId
                              +                                                     :: !Data.Int.Int64,
                              +                                                     _MemoryLogRawAllocation'operation ::
                              +                                                     !Data.Text.Text,
                              +                                                     _MemoryLogRawAllocation'numBytes ::
                              +                                                     !Data.Int.Int64,
                              +                                                     _MemoryLogRawAllocation'ptr ::
                              +                                                     !Data.Word.Word64,
                              +                                                     _MemoryLogRawAllocation'allocationId ::
                              +                                                     !Data.Int.Int64,
                              +                                                     _MemoryLogRawAllocation'allocatorName ::
                              +                                                     !Data.Text.Text}
                              +                            deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "stepId" f MemoryLogRawAllocation
                              +           MemoryLogRawAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawAllocation'stepId
                              +                 (\ x__ y__ -> x__{_MemoryLogRawAllocation'stepId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "operation" f MemoryLogRawAllocation
                              +           MemoryLogRawAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawAllocation'operation
                              +                 (\ x__ y__ -> x__{_MemoryLogRawAllocation'operation = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "numBytes" f MemoryLogRawAllocation
                              +           MemoryLogRawAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawAllocation'numBytes
                              +                 (\ x__ y__ -> x__{_MemoryLogRawAllocation'numBytes = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Word.Word64, b ~ Data.Word.Word64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "ptr" f MemoryLogRawAllocation
                              +           MemoryLogRawAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawAllocation'ptr
                              +                 (\ x__ y__ -> x__{_MemoryLogRawAllocation'ptr = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocationId" f MemoryLogRawAllocation
                              +           MemoryLogRawAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawAllocation'allocationId
                              +                 (\ x__ y__ -> x__{_MemoryLogRawAllocation'allocationId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocatorName" f MemoryLogRawAllocation
                              +           MemoryLogRawAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawAllocation'allocatorName
                              +                 (\ x__ y__ -> x__{_MemoryLogRawAllocation'allocatorName = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MemoryLogRawAllocation where
                              +        def
                              +          = MemoryLogRawAllocation{_MemoryLogRawAllocation'stepId =
                              +                                     Data.ProtoLens.fieldDefault,
                              +                                   _MemoryLogRawAllocation'operation = Data.ProtoLens.fieldDefault,
                              +                                   _MemoryLogRawAllocation'numBytes = Data.ProtoLens.fieldDefault,
                              +                                   _MemoryLogRawAllocation'ptr = Data.ProtoLens.fieldDefault,
                              +                                   _MemoryLogRawAllocation'allocationId =
                              +                                     Data.ProtoLens.fieldDefault,
                              +                                   _MemoryLogRawAllocation'allocatorName =
                              +                                     Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message MemoryLogRawAllocation where
                              +        descriptor
                              +          = let stepId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "step_id"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional stepId)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawAllocation
                              +                operation__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "operation"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional operation)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawAllocation
                              +                numBytes__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "num_bytes"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numBytes)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawAllocation
                              +                ptr__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "ptr"
                              +                      (Data.ProtoLens.UInt64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional ptr)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawAllocation
                              +                allocationId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocation_id"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocationId)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawAllocation
                              +                allocatorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocator_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorName)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawAllocation
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemoryLogRawAllocation")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, stepId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, operation__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, numBytes__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, ptr__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, allocationId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, allocatorName__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("step_id", stepId__field_descriptor),
                              +                    ("operation", operation__field_descriptor),
                              +                    ("num_bytes", numBytes__field_descriptor),
                              +                    ("ptr", ptr__field_descriptor),
                              +                    ("allocation_id", allocationId__field_descriptor),
                              +                    ("allocator_name", allocatorName__field_descriptor)])
                              +
                              +data MemoryLogRawDeallocation = MemoryLogRawDeallocation{_MemoryLogRawDeallocation'stepId
                              +                                                         :: !Data.Int.Int64,
                              +                                                         _MemoryLogRawDeallocation'operation ::
                              +                                                         !Data.Text.Text,
                              +                                                         _MemoryLogRawDeallocation'allocationId ::
                              +                                                         !Data.Int.Int64,
                              +                                                         _MemoryLogRawDeallocation'allocatorName ::
                              +                                                         !Data.Text.Text,
                              +                                                         _MemoryLogRawDeallocation'deferred ::
                              +                                                         !Prelude.Bool}
                              +                              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "stepId" f MemoryLogRawDeallocation
                              +           MemoryLogRawDeallocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawDeallocation'stepId
                              +                 (\ x__ y__ -> x__{_MemoryLogRawDeallocation'stepId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "operation" f MemoryLogRawDeallocation
                              +           MemoryLogRawDeallocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawDeallocation'operation
                              +                 (\ x__ y__ -> x__{_MemoryLogRawDeallocation'operation = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocationId" f MemoryLogRawDeallocation
                              +           MemoryLogRawDeallocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawDeallocation'allocationId
                              +                 (\ x__ y__ -> x__{_MemoryLogRawDeallocation'allocationId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocatorName" f MemoryLogRawDeallocation
                              +           MemoryLogRawDeallocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MemoryLogRawDeallocation'allocatorName
                              +                 (\ x__ y__ -> x__{_MemoryLogRawDeallocation'allocatorName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deferred" f MemoryLogRawDeallocation
                              +           MemoryLogRawDeallocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogRawDeallocation'deferred
                              +                 (\ x__ y__ -> x__{_MemoryLogRawDeallocation'deferred = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MemoryLogRawDeallocation where
                              +        def
                              +          = MemoryLogRawDeallocation{_MemoryLogRawDeallocation'stepId =
                              +                                       Data.ProtoLens.fieldDefault,
                              +                                     _MemoryLogRawDeallocation'operation =
                              +                                       Data.ProtoLens.fieldDefault,
                              +                                     _MemoryLogRawDeallocation'allocationId =
                              +                                       Data.ProtoLens.fieldDefault,
                              +                                     _MemoryLogRawDeallocation'allocatorName =
                              +                                       Data.ProtoLens.fieldDefault,
                              +                                     _MemoryLogRawDeallocation'deferred =
                              +                                       Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message MemoryLogRawDeallocation where
                              +        descriptor
                              +          = let stepId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "step_id"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional stepId)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawDeallocation
                              +                operation__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "operation"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional operation)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawDeallocation
                              +                allocationId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocation_id"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocationId)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawDeallocation
                              +                allocatorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocator_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorName)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawDeallocation
                              +                deferred__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "deferred"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional deferred)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogRawDeallocation
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemoryLogRawDeallocation")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, stepId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, operation__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, allocationId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, allocatorName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, deferred__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("step_id", stepId__field_descriptor),
                              +                    ("operation", operation__field_descriptor),
                              +                    ("allocation_id", allocationId__field_descriptor),
                              +                    ("allocator_name", allocatorName__field_descriptor),
                              +                    ("deferred", deferred__field_descriptor)])
                              +
                              +data MemoryLogStep = MemoryLogStep{_MemoryLogStep'stepId ::
                              +                                   !Data.Int.Int64,
                              +                                   _MemoryLogStep'handle :: !Data.Text.Text}
                              +                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "stepId" f MemoryLogStep MemoryLogStep a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogStep'stepId
                              +                 (\ x__ y__ -> x__{_MemoryLogStep'stepId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "handle" f MemoryLogStep MemoryLogStep a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogStep'handle
                              +                 (\ x__ y__ -> x__{_MemoryLogStep'handle = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MemoryLogStep where
                              +        def
                              +          = MemoryLogStep{_MemoryLogStep'stepId =
                              +                            Data.ProtoLens.fieldDefault,
                              +                          _MemoryLogStep'handle = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message MemoryLogStep where
                              +        descriptor
                              +          = let stepId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "step_id"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional stepId)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogStep
                              +                handle__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "handle"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional handle)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogStep
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemoryLogStep")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, stepId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, handle__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("step_id", stepId__field_descriptor),
                              +                    ("handle", handle__field_descriptor)])
                              +
                              +data MemoryLogTensorAllocation = MemoryLogTensorAllocation{_MemoryLogTensorAllocation'stepId
                              +                                                           :: !Data.Int.Int64,
                              +                                                           _MemoryLogTensorAllocation'kernelName ::
                              +                                                           !Data.Text.Text,
                              +                                                           _MemoryLogTensorAllocation'tensor ::
                              +                                                           !(Prelude.Maybe
                              +                                                               Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription)}
                              +                               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "stepId" f MemoryLogTensorAllocation
                              +           MemoryLogTensorAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogTensorAllocation'stepId
                              +                 (\ x__ y__ -> x__{_MemoryLogTensorAllocation'stepId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "kernelName" f MemoryLogTensorAllocation
                              +           MemoryLogTensorAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogTensorAllocation'kernelName
                              +                 (\ x__ y__ -> x__{_MemoryLogTensorAllocation'kernelName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          b ~
                              +            Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensor" f MemoryLogTensorAllocation
                              +           MemoryLogTensorAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogTensorAllocation'tensor
                              +                 (\ x__ y__ -> x__{_MemoryLogTensorAllocation'tensor = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'tensor" f MemoryLogTensorAllocation
                              +           MemoryLogTensorAllocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogTensorAllocation'tensor
                              +                 (\ x__ y__ -> x__{_MemoryLogTensorAllocation'tensor = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MemoryLogTensorAllocation where
                              +        def
                              +          = MemoryLogTensorAllocation{_MemoryLogTensorAllocation'stepId =
                              +                                        Data.ProtoLens.fieldDefault,
                              +                                      _MemoryLogTensorAllocation'kernelName =
                              +                                        Data.ProtoLens.fieldDefault,
                              +                                      _MemoryLogTensorAllocation'tensor = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message MemoryLogTensorAllocation where
                              +        descriptor
                              +          = let stepId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "step_id"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional stepId)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogTensorAllocation
                              +                kernelName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "kernel_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional kernelName)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogTensorAllocation
                              +                tensor__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription)
                              +                      (Data.ProtoLens.OptionalField maybe'tensor)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogTensorAllocation
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemoryLogTensorAllocation")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, stepId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, kernelName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, tensor__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("step_id", stepId__field_descriptor),
                              +                    ("kernel_name", kernelName__field_descriptor),
                              +                    ("tensor", tensor__field_descriptor)])
                              +
                              +data MemoryLogTensorDeallocation = MemoryLogTensorDeallocation{_MemoryLogTensorDeallocation'allocationId
                              +                                                               :: !Data.Int.Int64,
                              +                                                               _MemoryLogTensorDeallocation'allocatorName
                              +                                                               :: !Data.Text.Text}
                              +                                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocationId" f MemoryLogTensorDeallocation
                              +           MemoryLogTensorDeallocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MemoryLogTensorDeallocation'allocationId
                              +                 (\ x__ y__ ->
                              +                    x__{_MemoryLogTensorDeallocation'allocationId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocatorName" f MemoryLogTensorDeallocation
                              +           MemoryLogTensorDeallocation
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MemoryLogTensorDeallocation'allocatorName
                              +                 (\ x__ y__ ->
                              +                    x__{_MemoryLogTensorDeallocation'allocatorName = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MemoryLogTensorDeallocation
                              +         where
                              +        def
                              +          = MemoryLogTensorDeallocation{_MemoryLogTensorDeallocation'allocationId
                              +                                          = Data.ProtoLens.fieldDefault,
                              +                                        _MemoryLogTensorDeallocation'allocatorName =
                              +                                          Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message MemoryLogTensorDeallocation where
                              +        descriptor
                              +          = let allocationId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocation_id"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocationId)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogTensorDeallocation
                              +                allocatorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocator_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorName)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogTensorDeallocation
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemoryLogTensorDeallocation")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, allocationId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, allocatorName__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("allocation_id", allocationId__field_descriptor),
                              +                    ("allocator_name", allocatorName__field_descriptor)])
                              +
                              +data MemoryLogTensorOutput = MemoryLogTensorOutput{_MemoryLogTensorOutput'stepId
                              +                                                   :: !Data.Int.Int64,
                              +                                                   _MemoryLogTensorOutput'kernelName ::
                              +                                                   !Data.Text.Text,
                              +                                                   _MemoryLogTensorOutput'index :: !Data.Int.Int32,
                              +                                                   _MemoryLogTensorOutput'tensor ::
                              +                                                   !(Prelude.Maybe
                              +                                                       Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription)}
                              +                           deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "stepId" f MemoryLogTensorOutput
                              +           MemoryLogTensorOutput
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogTensorOutput'stepId
                              +                 (\ x__ y__ -> x__{_MemoryLogTensorOutput'stepId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "kernelName" f MemoryLogTensorOutput
                              +           MemoryLogTensorOutput
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogTensorOutput'kernelName
                              +                 (\ x__ y__ -> x__{_MemoryLogTensorOutput'kernelName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "index" f MemoryLogTensorOutput
                              +           MemoryLogTensorOutput
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogTensorOutput'index
                              +                 (\ x__ y__ -> x__{_MemoryLogTensorOutput'index = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          b ~
                              +            Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensor" f MemoryLogTensorOutput
                              +           MemoryLogTensorOutput
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogTensorOutput'tensor
                              +                 (\ x__ y__ -> x__{_MemoryLogTensorOutput'tensor = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'tensor" f MemoryLogTensorOutput
                              +           MemoryLogTensorOutput
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryLogTensorOutput'tensor
                              +                 (\ x__ y__ -> x__{_MemoryLogTensorOutput'tensor = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MemoryLogTensorOutput where
                              +        def
                              +          = MemoryLogTensorOutput{_MemoryLogTensorOutput'stepId =
                              +                                    Data.ProtoLens.fieldDefault,
                              +                                  _MemoryLogTensorOutput'kernelName = Data.ProtoLens.fieldDefault,
                              +                                  _MemoryLogTensorOutput'index = Data.ProtoLens.fieldDefault,
                              +                                  _MemoryLogTensorOutput'tensor = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message MemoryLogTensorOutput where
                              +        descriptor
                              +          = let stepId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "step_id"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional stepId)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogTensorOutput
                              +                kernelName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "kernel_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional kernelName)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogTensorOutput
                              +                index__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "index"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional index)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogTensorOutput
                              +                tensor__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription)
                              +                      (Data.ProtoLens.OptionalField maybe'tensor)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryLogTensorOutput
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemoryLogTensorOutput")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, stepId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, kernelName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, index__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, tensor__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("step_id", stepId__field_descriptor),
                              +                    ("kernel_name", kernelName__field_descriptor),
                              +                    ("index", index__field_descriptor),
                              +                    ("tensor", tensor__field_descriptor)])
                              +
                              +allocationId ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "allocationId" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +allocationId
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allocationId")
                              +
                              +allocatorName ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "allocatorName" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +allocatorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allocatorName")
                              +
                              +deferred ::
                              +         forall f s t a b . (Lens.Labels.HasLens "deferred" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +deferred
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "deferred")
                              +
                              +handle ::
                              +       forall f s t a b . (Lens.Labels.HasLens "handle" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +handle
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "handle")
                              +
                              +index ::
                              +      forall f s t a b . (Lens.Labels.HasLens "index" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +index
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "index")
                              +
                              +kernelName ::
                              +           forall f s t a b . (Lens.Labels.HasLens "kernelName" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +kernelName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "kernelName")
                              +
                              +maybe'tensor ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "maybe'tensor" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +maybe'tensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'tensor")
                              +
                              +numBytes ::
                              +         forall f s t a b . (Lens.Labels.HasLens "numBytes" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +numBytes
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "numBytes")
                              +
                              +operation ::
                              +          forall f s t a b . (Lens.Labels.HasLens "operation" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +operation
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "operation")
                              +
                              +ptr ::
                              +    forall f s t a b . (Lens.Labels.HasLens "ptr" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +ptr
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "ptr")
                              +
                              +stepId ::
                              +       forall f s t a b . (Lens.Labels.HasLens "stepId" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +stepId
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "stepId")
                              +
                              +tensor ::
                              +       forall f s t a b . (Lens.Labels.HasLens "tensor" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +tensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensor")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.NodeDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.NodeDef.html new file mode 100644 index 0000000..db57c24 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.NodeDef.html @@ -0,0 +1,277 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/node_def.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.NodeDef where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.AttrValue
                              +
                              +data NodeDef = NodeDef{_NodeDef'name :: !Data.Text.Text,
                              +                       _NodeDef'op :: !Data.Text.Text,
                              +                       _NodeDef'input :: ![Data.Text.Text],
                              +                       _NodeDef'device :: !Data.Text.Text,
                              +                       _NodeDef'attr ::
                              +                       !(Data.Map.Map Data.Text.Text
                              +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)}
                              +             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f NodeDef NodeDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeDef'name
                              +                 (\ x__ y__ -> x__{_NodeDef'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "op" f NodeDef NodeDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeDef'op
                              +                 (\ x__ y__ -> x__{_NodeDef'op = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "input" f NodeDef NodeDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeDef'input
                              +                 (\ x__ y__ -> x__{_NodeDef'input = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "device" f NodeDef NodeDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeDef'device
                              +                 (\ x__ y__ -> x__{_NodeDef'device = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Data.Map.Map Data.Text.Text
                              +              Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~
                              +            Data.Map.Map Data.Text.Text
                              +              Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "attr" f NodeDef NodeDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeDef'attr
                              +                 (\ x__ y__ -> x__{_NodeDef'attr = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default NodeDef where
                              +        def
                              +          = NodeDef{_NodeDef'name = Data.ProtoLens.fieldDefault,
                              +                    _NodeDef'op = Data.ProtoLens.fieldDefault, _NodeDef'input = [],
                              +                    _NodeDef'device = Data.ProtoLens.fieldDefault,
                              +                    _NodeDef'attr = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message NodeDef where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeDef
                              +                op__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "op"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional op)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeDef
                              +                input__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "input"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked input)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeDef
                              +                device__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeDef
                              +                attr__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "attr"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor NodeDef'AttrEntry)
                              +                      (Data.ProtoLens.MapField key value attr)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.NodeDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, op__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, input__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, device__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, attr__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor), ("op", op__field_descriptor),
                              +                    ("input", input__field_descriptor),
                              +                    ("device", device__field_descriptor),
                              +                    ("attr", attr__field_descriptor)])
                              +
                              +data NodeDef'AttrEntry = NodeDef'AttrEntry{_NodeDef'AttrEntry'key
                              +                                           :: !Data.Text.Text,
                              +                                           _NodeDef'AttrEntry'value ::
                              +                                           !(Prelude.Maybe
                              +                                               Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)}
                              +                       deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f NodeDef'AttrEntry NodeDef'AttrEntry a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeDef'AttrEntry'key
                              +                 (\ x__ y__ -> x__{_NodeDef'AttrEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f NodeDef'AttrEntry NodeDef'AttrEntry a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeDef'AttrEntry'value
                              +                 (\ x__ y__ -> x__{_NodeDef'AttrEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f NodeDef'AttrEntry
                              +           NodeDef'AttrEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeDef'AttrEntry'value
                              +                 (\ x__ y__ -> x__{_NodeDef'AttrEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default NodeDef'AttrEntry where
                              +        def
                              +          = NodeDef'AttrEntry{_NodeDef'AttrEntry'key =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _NodeDef'AttrEntry'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message NodeDef'AttrEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeDef'AttrEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeDef'AttrEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.NodeDef.AttrEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +attr ::
                              +     forall f s t a b . (Lens.Labels.HasLens "attr" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +attr
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "attr")
                              +
                              +device ::
                              +       forall f s t a b . (Lens.Labels.HasLens "device" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +device
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "device")
                              +
                              +input ::
                              +      forall f s t a b . (Lens.Labels.HasLens "input" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +input
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "input")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +maybe'value ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'value" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'value")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +op ::
                              +   forall f s t a b . (Lens.Labels.HasLens "op" f s t a b) =>
                              +     Lens.Family2.LensLike f s t a b
                              +op
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "op")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.OpDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.OpDef.html new file mode 100644 index 0000000..173a235 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.OpDef.html @@ -0,0 +1,895 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/op_def.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.OpDef where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.AttrValue
                              +import qualified Proto.Tensorflow.Core.Framework.Types
                              +
                              +data OpDef = OpDef{_OpDef'name :: !Data.Text.Text,
                              +                   _OpDef'inputArg :: ![OpDef'ArgDef],
                              +                   _OpDef'outputArg :: ![OpDef'ArgDef],
                              +                   _OpDef'attr :: ![OpDef'AttrDef],
                              +                   _OpDef'deprecation :: !(Prelude.Maybe OpDeprecation),
                              +                   _OpDef'summary :: !Data.Text.Text,
                              +                   _OpDef'description :: !Data.Text.Text,
                              +                   _OpDef'isCommutative :: !Prelude.Bool,
                              +                   _OpDef'isAggregate :: !Prelude.Bool,
                              +                   _OpDef'isStateful :: !Prelude.Bool,
                              +                   _OpDef'allowsUninitializedInput :: !Prelude.Bool}
                              +           deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'name
                              +                 (\ x__ y__ -> x__{_OpDef'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [OpDef'ArgDef], b ~ [OpDef'ArgDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "inputArg" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'inputArg
                              +                 (\ x__ y__ -> x__{_OpDef'inputArg = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [OpDef'ArgDef], b ~ [OpDef'ArgDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "outputArg" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'outputArg
                              +                 (\ x__ y__ -> x__{_OpDef'outputArg = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [OpDef'AttrDef], b ~ [OpDef'AttrDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "attr" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'attr
                              +                 (\ x__ y__ -> x__{_OpDef'attr = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ OpDeprecation, b ~ OpDeprecation,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deprecation" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'deprecation
                              +                 (\ x__ y__ -> x__{_OpDef'deprecation = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe OpDeprecation,
                              +          b ~ Prelude.Maybe OpDeprecation, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'deprecation" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'deprecation
                              +                 (\ x__ y__ -> x__{_OpDef'deprecation = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "summary" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'summary
                              +                 (\ x__ y__ -> x__{_OpDef'summary = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "description" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'description
                              +                 (\ x__ y__ -> x__{_OpDef'description = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "isCommutative" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'isCommutative
                              +                 (\ x__ y__ -> x__{_OpDef'isCommutative = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "isAggregate" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'isAggregate
                              +                 (\ x__ y__ -> x__{_OpDef'isAggregate = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "isStateful" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'isStateful
                              +                 (\ x__ y__ -> x__{_OpDef'isStateful = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allowsUninitializedInput" f OpDef OpDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'allowsUninitializedInput
                              +                 (\ x__ y__ -> x__{_OpDef'allowsUninitializedInput = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default OpDef where
                              +        def
                              +          = OpDef{_OpDef'name = Data.ProtoLens.fieldDefault,
                              +                  _OpDef'inputArg = [], _OpDef'outputArg = [], _OpDef'attr = [],
                              +                  _OpDef'deprecation = Prelude.Nothing,
                              +                  _OpDef'summary = Data.ProtoLens.fieldDefault,
                              +                  _OpDef'description = Data.ProtoLens.fieldDefault,
                              +                  _OpDef'isCommutative = Data.ProtoLens.fieldDefault,
                              +                  _OpDef'isAggregate = Data.ProtoLens.fieldDefault,
                              +                  _OpDef'isStateful = Data.ProtoLens.fieldDefault,
                              +                  _OpDef'allowsUninitializedInput = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message OpDef where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                inputArg__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "input_arg"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor OpDef'ArgDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked inputArg)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                outputArg__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "output_arg"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor OpDef'ArgDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked outputArg)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                attr__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "attr"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor OpDef'AttrDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked attr)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                deprecation__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "deprecation"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor OpDeprecation)
                              +                      (Data.ProtoLens.OptionalField maybe'deprecation)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                summary__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "summary"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional summary)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                description__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "description"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional description)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                isCommutative__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "is_commutative"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isCommutative)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                isAggregate__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "is_aggregate"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isAggregate)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                isStateful__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "is_stateful"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isStateful)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +                allowsUninitializedInput__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allows_uninitialized_input"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         allowsUninitializedInput)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.OpDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, inputArg__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, outputArg__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, attr__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, deprecation__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, summary__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, description__field_descriptor),
                              +                    (Data.ProtoLens.Tag 18, isCommutative__field_descriptor),
                              +                    (Data.ProtoLens.Tag 16, isAggregate__field_descriptor),
                              +                    (Data.ProtoLens.Tag 17, isStateful__field_descriptor),
                              +                    (Data.ProtoLens.Tag 19,
                              +                     allowsUninitializedInput__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("input_arg", inputArg__field_descriptor),
                              +                    ("output_arg", outputArg__field_descriptor),
                              +                    ("attr", attr__field_descriptor),
                              +                    ("deprecation", deprecation__field_descriptor),
                              +                    ("summary", summary__field_descriptor),
                              +                    ("description", description__field_descriptor),
                              +                    ("is_commutative", isCommutative__field_descriptor),
                              +                    ("is_aggregate", isAggregate__field_descriptor),
                              +                    ("is_stateful", isStateful__field_descriptor),
                              +                    ("allows_uninitialized_input",
                              +                     allowsUninitializedInput__field_descriptor)])
                              +
                              +data OpDef'ArgDef = OpDef'ArgDef{_OpDef'ArgDef'name ::
                              +                                 !Data.Text.Text,
                              +                                 _OpDef'ArgDef'description :: !Data.Text.Text,
                              +                                 _OpDef'ArgDef'type' ::
                              +                                 !Proto.Tensorflow.Core.Framework.Types.DataType,
                              +                                 _OpDef'ArgDef'typeAttr :: !Data.Text.Text,
                              +                                 _OpDef'ArgDef'numberAttr :: !Data.Text.Text,
                              +                                 _OpDef'ArgDef'typeListAttr :: !Data.Text.Text,
                              +                                 _OpDef'ArgDef'isRef :: !Prelude.Bool}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f OpDef'ArgDef OpDef'ArgDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'ArgDef'name
                              +                 (\ x__ y__ -> x__{_OpDef'ArgDef'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "description" f OpDef'ArgDef OpDef'ArgDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'ArgDef'description
                              +                 (\ x__ y__ -> x__{_OpDef'ArgDef'description = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "type'" f OpDef'ArgDef OpDef'ArgDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'ArgDef'type'
                              +                 (\ x__ y__ -> x__{_OpDef'ArgDef'type' = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "typeAttr" f OpDef'ArgDef OpDef'ArgDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'ArgDef'typeAttr
                              +                 (\ x__ y__ -> x__{_OpDef'ArgDef'typeAttr = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "numberAttr" f OpDef'ArgDef OpDef'ArgDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'ArgDef'numberAttr
                              +                 (\ x__ y__ -> x__{_OpDef'ArgDef'numberAttr = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "typeListAttr" f OpDef'ArgDef OpDef'ArgDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'ArgDef'typeListAttr
                              +                 (\ x__ y__ -> x__{_OpDef'ArgDef'typeListAttr = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "isRef" f OpDef'ArgDef OpDef'ArgDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'ArgDef'isRef
                              +                 (\ x__ y__ -> x__{_OpDef'ArgDef'isRef = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default OpDef'ArgDef where
                              +        def
                              +          = OpDef'ArgDef{_OpDef'ArgDef'name = Data.ProtoLens.fieldDefault,
                              +                         _OpDef'ArgDef'description = Data.ProtoLens.fieldDefault,
                              +                         _OpDef'ArgDef'type' = Data.Default.Class.def,
                              +                         _OpDef'ArgDef'typeAttr = Data.ProtoLens.fieldDefault,
                              +                         _OpDef'ArgDef'numberAttr = Data.ProtoLens.fieldDefault,
                              +                         _OpDef'ArgDef'typeListAttr = Data.ProtoLens.fieldDefault,
                              +                         _OpDef'ArgDef'isRef = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message OpDef'ArgDef where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'ArgDef
                              +                description__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "description"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional description)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'ArgDef
                              +                type'__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "type"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional type')
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'ArgDef
                              +                typeAttr__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "type_attr"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional typeAttr)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'ArgDef
                              +                numberAttr__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "number_attr"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numberAttr)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'ArgDef
                              +                typeListAttr__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "type_list_attr"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional typeListAttr)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'ArgDef
                              +                isRef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "is_ref"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isRef)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'ArgDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.OpDef.ArgDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, description__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, type'__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, typeAttr__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, numberAttr__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, typeListAttr__field_descriptor),
                              +                    (Data.ProtoLens.Tag 16, isRef__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("description", description__field_descriptor),
                              +                    ("type", type'__field_descriptor),
                              +                    ("type_attr", typeAttr__field_descriptor),
                              +                    ("number_attr", numberAttr__field_descriptor),
                              +                    ("type_list_attr", typeListAttr__field_descriptor),
                              +                    ("is_ref", isRef__field_descriptor)])
                              +
                              +data OpDef'AttrDef = OpDef'AttrDef{_OpDef'AttrDef'name ::
                              +                                   !Data.Text.Text,
                              +                                   _OpDef'AttrDef'type' :: !Data.Text.Text,
                              +                                   _OpDef'AttrDef'defaultValue ::
                              +                                   !(Prelude.Maybe
                              +                                       Proto.Tensorflow.Core.Framework.AttrValue.AttrValue),
                              +                                   _OpDef'AttrDef'description :: !Data.Text.Text,
                              +                                   _OpDef'AttrDef'hasMinimum :: !Prelude.Bool,
                              +                                   _OpDef'AttrDef'minimum :: !Data.Int.Int64,
                              +                                   _OpDef'AttrDef'allowedValues ::
                              +                                   !(Prelude.Maybe
                              +                                       Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)}
                              +                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f OpDef'AttrDef OpDef'AttrDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'AttrDef'name
                              +                 (\ x__ y__ -> x__{_OpDef'AttrDef'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "type'" f OpDef'AttrDef OpDef'AttrDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'AttrDef'type'
                              +                 (\ x__ y__ -> x__{_OpDef'AttrDef'type' = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "defaultValue" f OpDef'AttrDef OpDef'AttrDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'AttrDef'defaultValue
                              +                 (\ x__ y__ -> x__{_OpDef'AttrDef'defaultValue = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'defaultValue" f OpDef'AttrDef
                              +           OpDef'AttrDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'AttrDef'defaultValue
                              +                 (\ x__ y__ -> x__{_OpDef'AttrDef'defaultValue = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "description" f OpDef'AttrDef OpDef'AttrDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'AttrDef'description
                              +                 (\ x__ y__ -> x__{_OpDef'AttrDef'description = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hasMinimum" f OpDef'AttrDef OpDef'AttrDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'AttrDef'hasMinimum
                              +                 (\ x__ y__ -> x__{_OpDef'AttrDef'hasMinimum = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "minimum" f OpDef'AttrDef OpDef'AttrDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'AttrDef'minimum
                              +                 (\ x__ y__ -> x__{_OpDef'AttrDef'minimum = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~ Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allowedValues" f OpDef'AttrDef OpDef'AttrDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'AttrDef'allowedValues
                              +                 (\ x__ y__ -> x__{_OpDef'AttrDef'allowedValues = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'allowedValues" f OpDef'AttrDef
                              +           OpDef'AttrDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDef'AttrDef'allowedValues
                              +                 (\ x__ y__ -> x__{_OpDef'AttrDef'allowedValues = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default OpDef'AttrDef where
                              +        def
                              +          = OpDef'AttrDef{_OpDef'AttrDef'name = Data.ProtoLens.fieldDefault,
                              +                          _OpDef'AttrDef'type' = Data.ProtoLens.fieldDefault,
                              +                          _OpDef'AttrDef'defaultValue = Prelude.Nothing,
                              +                          _OpDef'AttrDef'description = Data.ProtoLens.fieldDefault,
                              +                          _OpDef'AttrDef'hasMinimum = Data.ProtoLens.fieldDefault,
                              +                          _OpDef'AttrDef'minimum = Data.ProtoLens.fieldDefault,
                              +                          _OpDef'AttrDef'allowedValues = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message OpDef'AttrDef where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'AttrDef
                              +                type'__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "type"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional type')
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'AttrDef
                              +                defaultValue__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "default_value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
                              +                      (Data.ProtoLens.OptionalField maybe'defaultValue)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'AttrDef
                              +                description__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "description"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional description)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'AttrDef
                              +                hasMinimum__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "has_minimum"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional hasMinimum)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'AttrDef
                              +                minimum__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "minimum"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional minimum)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'AttrDef
                              +                allowedValues__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allowed_values"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
                              +                      (Data.ProtoLens.OptionalField maybe'allowedValues)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDef'AttrDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.OpDef.AttrDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, type'__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, defaultValue__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, description__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, hasMinimum__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, minimum__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, allowedValues__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("type", type'__field_descriptor),
                              +                    ("default_value", defaultValue__field_descriptor),
                              +                    ("description", description__field_descriptor),
                              +                    ("has_minimum", hasMinimum__field_descriptor),
                              +                    ("minimum", minimum__field_descriptor),
                              +                    ("allowed_values", allowedValues__field_descriptor)])
                              +
                              +data OpDeprecation = OpDeprecation{_OpDeprecation'version ::
                              +                                   !Data.Int.Int32,
                              +                                   _OpDeprecation'explanation :: !Data.Text.Text}
                              +                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "version" f OpDeprecation OpDeprecation a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDeprecation'version
                              +                 (\ x__ y__ -> x__{_OpDeprecation'version = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "explanation" f OpDeprecation OpDeprecation a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpDeprecation'explanation
                              +                 (\ x__ y__ -> x__{_OpDeprecation'explanation = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default OpDeprecation where
                              +        def
                              +          = OpDeprecation{_OpDeprecation'version =
                              +                            Data.ProtoLens.fieldDefault,
                              +                          _OpDeprecation'explanation = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message OpDeprecation where
                              +        descriptor
                              +          = let version__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "version"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional version)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDeprecation
                              +                explanation__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "explanation"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional explanation)
                              +                      :: Data.ProtoLens.FieldDescriptor OpDeprecation
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.OpDeprecation")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, version__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, explanation__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("version", version__field_descriptor),
                              +                    ("explanation", explanation__field_descriptor)])
                              +
                              +data OpList = OpList{_OpList'op :: ![OpDef]}
                              +            deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [OpDef], b ~ [OpDef], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "op" f OpList OpList a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OpList'op
                              +                 (\ x__ y__ -> x__{_OpList'op = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default OpList where
                              +        def = OpList{_OpList'op = []}
                              +
                              +instance Data.ProtoLens.Message OpList where
                              +        descriptor
                              +          = let op__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "op"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor OpDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked op)
                              +                      :: Data.ProtoLens.FieldDescriptor OpList
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.OpList")
                              +                (Data.Map.fromList [(Data.ProtoLens.Tag 1, op__field_descriptor)])
                              +                (Data.Map.fromList [("op", op__field_descriptor)])
                              +
                              +allowedValues ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "allowedValues" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +allowedValues
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allowedValues")
                              +
                              +allowsUninitializedInput ::
                              +                         forall f s t a b .
                              +                           (Lens.Labels.HasLens "allowsUninitializedInput" f s t a b) =>
                              +                           Lens.Family2.LensLike f s t a b
                              +allowsUninitializedInput
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "allowsUninitializedInput")
                              +
                              +attr ::
                              +     forall f s t a b . (Lens.Labels.HasLens "attr" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +attr
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "attr")
                              +
                              +defaultValue ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "defaultValue" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +defaultValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "defaultValue")
                              +
                              +deprecation ::
                              +            forall f s t a b . (Lens.Labels.HasLens "deprecation" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +deprecation
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "deprecation")
                              +
                              +description ::
                              +            forall f s t a b . (Lens.Labels.HasLens "description" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +description
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "description")
                              +
                              +explanation ::
                              +            forall f s t a b . (Lens.Labels.HasLens "explanation" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +explanation
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "explanation")
                              +
                              +hasMinimum ::
                              +           forall f s t a b . (Lens.Labels.HasLens "hasMinimum" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +hasMinimum
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "hasMinimum")
                              +
                              +inputArg ::
                              +         forall f s t a b . (Lens.Labels.HasLens "inputArg" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +inputArg
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "inputArg")
                              +
                              +isAggregate ::
                              +            forall f s t a b . (Lens.Labels.HasLens "isAggregate" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +isAggregate
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "isAggregate")
                              +
                              +isCommutative ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "isCommutative" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +isCommutative
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "isCommutative")
                              +
                              +isRef ::
                              +      forall f s t a b . (Lens.Labels.HasLens "isRef" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +isRef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "isRef")
                              +
                              +isStateful ::
                              +           forall f s t a b . (Lens.Labels.HasLens "isStateful" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +isStateful
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "isStateful")
                              +
                              +maybe'allowedValues ::
                              +                    forall f s t a b .
                              +                      (Lens.Labels.HasLens "maybe'allowedValues" f s t a b) =>
                              +                      Lens.Family2.LensLike f s t a b
                              +maybe'allowedValues
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'allowedValues")
                              +
                              +maybe'defaultValue ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "maybe'defaultValue" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +maybe'defaultValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'defaultValue")
                              +
                              +maybe'deprecation ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'deprecation" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'deprecation
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'deprecation")
                              +
                              +minimum ::
                              +        forall f s t a b . (Lens.Labels.HasLens "minimum" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +minimum
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "minimum")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +numberAttr ::
                              +           forall f s t a b . (Lens.Labels.HasLens "numberAttr" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +numberAttr
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "numberAttr")
                              +
                              +op ::
                              +   forall f s t a b . (Lens.Labels.HasLens "op" f s t a b) =>
                              +     Lens.Family2.LensLike f s t a b
                              +op
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "op")
                              +
                              +outputArg ::
                              +          forall f s t a b . (Lens.Labels.HasLens "outputArg" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +outputArg
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "outputArg")
                              +
                              +summary ::
                              +        forall f s t a b . (Lens.Labels.HasLens "summary" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +summary
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "summary")
                              +
                              +type' ::
                              +      forall f s t a b . (Lens.Labels.HasLens "type'" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +type'
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "type'")
                              +
                              +typeAttr ::
                              +         forall f s t a b . (Lens.Labels.HasLens "typeAttr" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +typeAttr
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "typeAttr")
                              +
                              +typeListAttr ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "typeListAttr" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +typeListAttr
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "typeListAttr")
                              +
                              +version ::
                              +        forall f s t a b . (Lens.Labels.HasLens "version" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +version
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "version")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.ResourceHandle.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.ResourceHandle.html new file mode 100644 index 0000000..f403a55 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.ResourceHandle.html @@ -0,0 +1,193 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/resource_handle.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.ResourceHandle where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data ResourceHandleProto = ResourceHandleProto{_ResourceHandleProto'device
                              +                                               :: !Data.Text.Text,
                              +                                               _ResourceHandleProto'container :: !Data.Text.Text,
                              +                                               _ResourceHandleProto'name :: !Data.Text.Text,
                              +                                               _ResourceHandleProto'hashCode :: !Data.Word.Word64,
                              +                                               _ResourceHandleProto'maybeTypeName ::
                              +                                               !Data.Text.Text}
                              +                         deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "device" f ResourceHandleProto
                              +           ResourceHandleProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ResourceHandleProto'device
                              +                 (\ x__ y__ -> x__{_ResourceHandleProto'device = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "container" f ResourceHandleProto
                              +           ResourceHandleProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ResourceHandleProto'container
                              +                 (\ x__ y__ -> x__{_ResourceHandleProto'container = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f ResourceHandleProto
                              +           ResourceHandleProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ResourceHandleProto'name
                              +                 (\ x__ y__ -> x__{_ResourceHandleProto'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Word.Word64, b ~ Data.Word.Word64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hashCode" f ResourceHandleProto
                              +           ResourceHandleProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ResourceHandleProto'hashCode
                              +                 (\ x__ y__ -> x__{_ResourceHandleProto'hashCode = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybeTypeName" f ResourceHandleProto
                              +           ResourceHandleProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ResourceHandleProto'maybeTypeName
                              +                 (\ x__ y__ -> x__{_ResourceHandleProto'maybeTypeName = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default ResourceHandleProto where
                              +        def
                              +          = ResourceHandleProto{_ResourceHandleProto'device =
                              +                                  Data.ProtoLens.fieldDefault,
                              +                                _ResourceHandleProto'container = Data.ProtoLens.fieldDefault,
                              +                                _ResourceHandleProto'name = Data.ProtoLens.fieldDefault,
                              +                                _ResourceHandleProto'hashCode = Data.ProtoLens.fieldDefault,
                              +                                _ResourceHandleProto'maybeTypeName = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message ResourceHandleProto where
                              +        descriptor
                              +          = let device__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
                              +                      :: Data.ProtoLens.FieldDescriptor ResourceHandleProto
                              +                container__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "container"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional container)
                              +                      :: Data.ProtoLens.FieldDescriptor ResourceHandleProto
                              +                name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor ResourceHandleProto
                              +                hashCode__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "hash_code"
                              +                      (Data.ProtoLens.UInt64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional hashCode)
                              +                      :: Data.ProtoLens.FieldDescriptor ResourceHandleProto
                              +                maybeTypeName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "maybe_type_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional maybeTypeName)
                              +                      :: Data.ProtoLens.FieldDescriptor ResourceHandleProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.ResourceHandleProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, device__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, container__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, hashCode__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, maybeTypeName__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("device", device__field_descriptor),
                              +                    ("container", container__field_descriptor),
                              +                    ("name", name__field_descriptor),
                              +                    ("hash_code", hashCode__field_descriptor),
                              +                    ("maybe_type_name", maybeTypeName__field_descriptor)])
                              +
                              +container ::
                              +          forall f s t a b . (Lens.Labels.HasLens "container" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +container
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "container")
                              +
                              +device ::
                              +       forall f s t a b . (Lens.Labels.HasLens "device" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +device
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "device")
                              +
                              +hashCode ::
                              +         forall f s t a b . (Lens.Labels.HasLens "hashCode" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +hashCode
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "hashCode")
                              +
                              +maybeTypeName ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybeTypeName" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybeTypeName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybeTypeName")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.StepStats.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.StepStats.html new file mode 100644 index 0000000..2a057f0 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.StepStats.html @@ -0,0 +1,1024 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/step_stats.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.StepStats where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified
                              +       Proto.Tensorflow.Core.Framework.AllocationDescription
                              +import qualified Proto.Tensorflow.Core.Framework.TensorDescription
                              +
                              +data AllocatorMemoryUsed = AllocatorMemoryUsed{_AllocatorMemoryUsed'allocatorName
                              +                                               :: !Data.Text.Text,
                              +                                               _AllocatorMemoryUsed'totalBytes :: !Data.Int.Int64,
                              +                                               _AllocatorMemoryUsed'peakBytes :: !Data.Int.Int64,
                              +                                               _AllocatorMemoryUsed'liveBytes :: !Data.Int.Int64,
                              +                                               _AllocatorMemoryUsed'allocatorBytesInUse ::
                              +                                               !Data.Int.Int64}
                              +                         deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocatorName" f AllocatorMemoryUsed
                              +           AllocatorMemoryUsed
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'allocatorName
                              +                 (\ x__ y__ -> x__{_AllocatorMemoryUsed'allocatorName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "totalBytes" f AllocatorMemoryUsed
                              +           AllocatorMemoryUsed
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'totalBytes
                              +                 (\ x__ y__ -> x__{_AllocatorMemoryUsed'totalBytes = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "peakBytes" f AllocatorMemoryUsed
                              +           AllocatorMemoryUsed
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'peakBytes
                              +                 (\ x__ y__ -> x__{_AllocatorMemoryUsed'peakBytes = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "liveBytes" f AllocatorMemoryUsed
                              +           AllocatorMemoryUsed
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'liveBytes
                              +                 (\ x__ y__ -> x__{_AllocatorMemoryUsed'liveBytes = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocatorBytesInUse" f AllocatorMemoryUsed
                              +           AllocatorMemoryUsed
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _AllocatorMemoryUsed'allocatorBytesInUse
                              +                 (\ x__ y__ -> x__{_AllocatorMemoryUsed'allocatorBytesInUse = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default AllocatorMemoryUsed where
                              +        def
                              +          = AllocatorMemoryUsed{_AllocatorMemoryUsed'allocatorName =
                              +                                  Data.ProtoLens.fieldDefault,
                              +                                _AllocatorMemoryUsed'totalBytes = Data.ProtoLens.fieldDefault,
                              +                                _AllocatorMemoryUsed'peakBytes = Data.ProtoLens.fieldDefault,
                              +                                _AllocatorMemoryUsed'liveBytes = Data.ProtoLens.fieldDefault,
                              +                                _AllocatorMemoryUsed'allocatorBytesInUse =
                              +                                  Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message AllocatorMemoryUsed where
                              +        descriptor
                              +          = let allocatorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocator_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorName)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocatorMemoryUsed
                              +                totalBytes__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "total_bytes"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional totalBytes)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocatorMemoryUsed
                              +                peakBytes__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "peak_bytes"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional peakBytes)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocatorMemoryUsed
                              +                liveBytes__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "live_bytes"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional liveBytes)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocatorMemoryUsed
                              +                allocatorBytesInUse__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocator_bytes_in_use"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         allocatorBytesInUse)
                              +                      :: Data.ProtoLens.FieldDescriptor AllocatorMemoryUsed
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.AllocatorMemoryUsed")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, allocatorName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, totalBytes__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, peakBytes__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, liveBytes__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, allocatorBytesInUse__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("allocator_name", allocatorName__field_descriptor),
                              +                    ("total_bytes", totalBytes__field_descriptor),
                              +                    ("peak_bytes", peakBytes__field_descriptor),
                              +                    ("live_bytes", liveBytes__field_descriptor),
                              +                    ("allocator_bytes_in_use", allocatorBytesInUse__field_descriptor)])
                              +
                              +data DeviceStepStats = DeviceStepStats{_DeviceStepStats'device ::
                              +                                       !Data.Text.Text,
                              +                                       _DeviceStepStats'nodeStats :: ![NodeExecStats]}
                              +                     deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "device" f DeviceStepStats DeviceStepStats a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceStepStats'device
                              +                 (\ x__ y__ -> x__{_DeviceStepStats'device = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [NodeExecStats], b ~ [NodeExecStats],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "nodeStats" f DeviceStepStats DeviceStepStats a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DeviceStepStats'nodeStats
                              +                 (\ x__ y__ -> x__{_DeviceStepStats'nodeStats = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default DeviceStepStats where
                              +        def
                              +          = DeviceStepStats{_DeviceStepStats'device =
                              +                              Data.ProtoLens.fieldDefault,
                              +                            _DeviceStepStats'nodeStats = []}
                              +
                              +instance Data.ProtoLens.Message DeviceStepStats where
                              +        descriptor
                              +          = let device__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
                              +                      :: Data.ProtoLens.FieldDescriptor DeviceStepStats
                              +                nodeStats__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "node_stats"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor NodeExecStats)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked nodeStats)
                              +                      :: Data.ProtoLens.FieldDescriptor DeviceStepStats
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.DeviceStepStats")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, device__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, nodeStats__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("device", device__field_descriptor),
                              +                    ("node_stats", nodeStats__field_descriptor)])
                              +
                              +data MemoryStats = MemoryStats{_MemoryStats'hostTempMemorySize ::
                              +                               !Data.Int.Int64,
                              +                               _MemoryStats'deviceTempMemorySize :: !Data.Int.Int64,
                              +                               _MemoryStats'hostPersistentMemorySize :: !Data.Int.Int64,
                              +                               _MemoryStats'devicePersistentMemorySize :: !Data.Int.Int64,
                              +                               _MemoryStats'hostPersistentTensorAllocIds :: ![Data.Int.Int64],
                              +                               _MemoryStats'devicePersistentTensorAllocIds :: ![Data.Int.Int64]}
                              +                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hostTempMemorySize" f MemoryStats MemoryStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryStats'hostTempMemorySize
                              +                 (\ x__ y__ -> x__{_MemoryStats'hostTempMemorySize = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deviceTempMemorySize" f MemoryStats
                              +           MemoryStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryStats'deviceTempMemorySize
                              +                 (\ x__ y__ -> x__{_MemoryStats'deviceTempMemorySize = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hostPersistentMemorySize" f MemoryStats
                              +           MemoryStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryStats'hostPersistentMemorySize
                              +                 (\ x__ y__ -> x__{_MemoryStats'hostPersistentMemorySize = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "devicePersistentMemorySize" f MemoryStats
                              +           MemoryStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MemoryStats'devicePersistentMemorySize
                              +                 (\ x__ y__ -> x__{_MemoryStats'devicePersistentMemorySize = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int64], b ~ [Data.Int.Int64],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hostPersistentTensorAllocIds" f MemoryStats
                              +           MemoryStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MemoryStats'hostPersistentTensorAllocIds
                              +                 (\ x__ y__ ->
                              +                    x__{_MemoryStats'hostPersistentTensorAllocIds = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int64], b ~ [Data.Int.Int64],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "devicePersistentTensorAllocIds" f MemoryStats
                              +           MemoryStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MemoryStats'devicePersistentTensorAllocIds
                              +                 (\ x__ y__ ->
                              +                    x__{_MemoryStats'devicePersistentTensorAllocIds = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MemoryStats where
                              +        def
                              +          = MemoryStats{_MemoryStats'hostTempMemorySize =
                              +                          Data.ProtoLens.fieldDefault,
                              +                        _MemoryStats'deviceTempMemorySize = Data.ProtoLens.fieldDefault,
                              +                        _MemoryStats'hostPersistentMemorySize =
                              +                          Data.ProtoLens.fieldDefault,
                              +                        _MemoryStats'devicePersistentMemorySize =
                              +                          Data.ProtoLens.fieldDefault,
                              +                        _MemoryStats'hostPersistentTensorAllocIds = [],
                              +                        _MemoryStats'devicePersistentTensorAllocIds = []}
                              +
                              +instance Data.ProtoLens.Message MemoryStats where
                              +        descriptor
                              +          = let hostTempMemorySize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "host_temp_memory_size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         hostTempMemorySize)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryStats
                              +                deviceTempMemorySize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device_temp_memory_size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         deviceTempMemorySize)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryStats
                              +                hostPersistentMemorySize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "host_persistent_memory_size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         hostPersistentMemorySize)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryStats
                              +                devicePersistentMemorySize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device_persistent_memory_size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         devicePersistentMemorySize)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryStats
                              +                hostPersistentTensorAllocIds__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "host_persistent_tensor_alloc_ids"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed
                              +                         hostPersistentTensorAllocIds)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryStats
                              +                devicePersistentTensorAllocIds__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor
                              +                      "device_persistent_tensor_alloc_ids"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed
                              +                         devicePersistentTensorAllocIds)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryStats
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemoryStats")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, hostTempMemorySize__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, deviceTempMemorySize__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, hostPersistentMemorySize__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4,
                              +                     devicePersistentMemorySize__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5,
                              +                     hostPersistentTensorAllocIds__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6,
                              +                     devicePersistentTensorAllocIds__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("host_temp_memory_size", hostTempMemorySize__field_descriptor),
                              +                    ("device_temp_memory_size",
                              +                     deviceTempMemorySize__field_descriptor),
                              +                    ("host_persistent_memory_size",
                              +                     hostPersistentMemorySize__field_descriptor),
                              +                    ("device_persistent_memory_size",
                              +                     devicePersistentMemorySize__field_descriptor),
                              +                    ("host_persistent_tensor_alloc_ids",
                              +                     hostPersistentTensorAllocIds__field_descriptor),
                              +                    ("device_persistent_tensor_alloc_ids",
                              +                     devicePersistentTensorAllocIds__field_descriptor)])
                              +
                              +data NodeExecStats = NodeExecStats{_NodeExecStats'nodeName ::
                              +                                   !Data.Text.Text,
                              +                                   _NodeExecStats'allStartMicros :: !Data.Int.Int64,
                              +                                   _NodeExecStats'opStartRelMicros :: !Data.Int.Int64,
                              +                                   _NodeExecStats'opEndRelMicros :: !Data.Int.Int64,
                              +                                   _NodeExecStats'allEndRelMicros :: !Data.Int.Int64,
                              +                                   _NodeExecStats'memory :: ![AllocatorMemoryUsed],
                              +                                   _NodeExecStats'output :: ![NodeOutput],
                              +                                   _NodeExecStats'timelineLabel :: !Data.Text.Text,
                              +                                   _NodeExecStats'scheduledMicros :: !Data.Int.Int64,
                              +                                   _NodeExecStats'threadId :: !Data.Word.Word32,
                              +                                   _NodeExecStats'referencedTensor ::
                              +                                   ![Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription],
                              +                                   _NodeExecStats'memoryStats :: !(Prelude.Maybe MemoryStats)}
                              +                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "nodeName" f NodeExecStats NodeExecStats a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'nodeName
                              +                 (\ x__ y__ -> x__{_NodeExecStats'nodeName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allStartMicros" f NodeExecStats NodeExecStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'allStartMicros
                              +                 (\ x__ y__ -> x__{_NodeExecStats'allStartMicros = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "opStartRelMicros" f NodeExecStats
                              +           NodeExecStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'opStartRelMicros
                              +                 (\ x__ y__ -> x__{_NodeExecStats'opStartRelMicros = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "opEndRelMicros" f NodeExecStats NodeExecStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'opEndRelMicros
                              +                 (\ x__ y__ -> x__{_NodeExecStats'opEndRelMicros = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allEndRelMicros" f NodeExecStats NodeExecStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'allEndRelMicros
                              +                 (\ x__ y__ -> x__{_NodeExecStats'allEndRelMicros = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [AllocatorMemoryUsed], b ~ [AllocatorMemoryUsed],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "memory" f NodeExecStats NodeExecStats a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'memory
                              +                 (\ x__ y__ -> x__{_NodeExecStats'memory = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [NodeOutput], b ~ [NodeOutput], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "output" f NodeExecStats NodeExecStats a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'output
                              +                 (\ x__ y__ -> x__{_NodeExecStats'output = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "timelineLabel" f NodeExecStats NodeExecStats a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'timelineLabel
                              +                 (\ x__ y__ -> x__{_NodeExecStats'timelineLabel = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "scheduledMicros" f NodeExecStats NodeExecStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'scheduledMicros
                              +                 (\ x__ y__ -> x__{_NodeExecStats'scheduledMicros = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Word.Word32, b ~ Data.Word.Word32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "threadId" f NodeExecStats NodeExecStats a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'threadId
                              +                 (\ x__ y__ -> x__{_NodeExecStats'threadId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            [Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription],
                              +          b ~
                              +            [Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "referencedTensor" f NodeExecStats
                              +           NodeExecStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'referencedTensor
                              +                 (\ x__ y__ -> x__{_NodeExecStats'referencedTensor = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ MemoryStats, b ~ MemoryStats, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "memoryStats" f NodeExecStats NodeExecStats a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'memoryStats
                              +                 (\ x__ y__ -> x__{_NodeExecStats'memoryStats = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe MemoryStats,
                              +          b ~ Prelude.Maybe MemoryStats, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'memoryStats" f NodeExecStats
                              +           NodeExecStats
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeExecStats'memoryStats
                              +                 (\ x__ y__ -> x__{_NodeExecStats'memoryStats = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default NodeExecStats where
                              +        def
                              +          = NodeExecStats{_NodeExecStats'nodeName =
                              +                            Data.ProtoLens.fieldDefault,
                              +                          _NodeExecStats'allStartMicros = Data.ProtoLens.fieldDefault,
                              +                          _NodeExecStats'opStartRelMicros = Data.ProtoLens.fieldDefault,
                              +                          _NodeExecStats'opEndRelMicros = Data.ProtoLens.fieldDefault,
                              +                          _NodeExecStats'allEndRelMicros = Data.ProtoLens.fieldDefault,
                              +                          _NodeExecStats'memory = [], _NodeExecStats'output = [],
                              +                          _NodeExecStats'timelineLabel = Data.ProtoLens.fieldDefault,
                              +                          _NodeExecStats'scheduledMicros = Data.ProtoLens.fieldDefault,
                              +                          _NodeExecStats'threadId = Data.ProtoLens.fieldDefault,
                              +                          _NodeExecStats'referencedTensor = [],
                              +                          _NodeExecStats'memoryStats = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message NodeExecStats where
                              +        descriptor
                              +          = let nodeName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "node_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional nodeName)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                allStartMicros__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "all_start_micros"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allStartMicros)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                opStartRelMicros__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "op_start_rel_micros"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         opStartRelMicros)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                opEndRelMicros__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "op_end_rel_micros"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional opEndRelMicros)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                allEndRelMicros__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "all_end_rel_micros"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allEndRelMicros)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                memory__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "memory"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor AllocatorMemoryUsed)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked memory)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                output__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "output"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor NodeOutput)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked output)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                timelineLabel__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "timeline_label"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional timelineLabel)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                scheduledMicros__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "scheduled_micros"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional scheduledMicros)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                threadId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "thread_id"
                              +                      (Data.ProtoLens.UInt32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional threadId)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                referencedTensor__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "referenced_tensor"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         referencedTensor)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +                memoryStats__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "memory_stats"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor MemoryStats)
                              +                      (Data.ProtoLens.OptionalField maybe'memoryStats)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeExecStats
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.NodeExecStats")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, nodeName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, allStartMicros__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, opStartRelMicros__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, opEndRelMicros__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, allEndRelMicros__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, memory__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, output__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, timelineLabel__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, scheduledMicros__field_descriptor),
                              +                    (Data.ProtoLens.Tag 10, threadId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 11, referencedTensor__field_descriptor),
                              +                    (Data.ProtoLens.Tag 12, memoryStats__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("node_name", nodeName__field_descriptor),
                              +                    ("all_start_micros", allStartMicros__field_descriptor),
                              +                    ("op_start_rel_micros", opStartRelMicros__field_descriptor),
                              +                    ("op_end_rel_micros", opEndRelMicros__field_descriptor),
                              +                    ("all_end_rel_micros", allEndRelMicros__field_descriptor),
                              +                    ("memory", memory__field_descriptor),
                              +                    ("output", output__field_descriptor),
                              +                    ("timeline_label", timelineLabel__field_descriptor),
                              +                    ("scheduled_micros", scheduledMicros__field_descriptor),
                              +                    ("thread_id", threadId__field_descriptor),
                              +                    ("referenced_tensor", referencedTensor__field_descriptor),
                              +                    ("memory_stats", memoryStats__field_descriptor)])
                              +
                              +data NodeOutput = NodeOutput{_NodeOutput'slot :: !Data.Int.Int32,
                              +                             _NodeOutput'tensorDescription ::
                              +                             !(Prelude.Maybe
                              +                                 Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription)}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "slot" f NodeOutput NodeOutput a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeOutput'slot
                              +                 (\ x__ y__ -> x__{_NodeOutput'slot = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          b ~
                              +            Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensorDescription" f NodeOutput NodeOutput a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeOutput'tensorDescription
                              +                 (\ x__ y__ -> x__{_NodeOutput'tensorDescription = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'tensorDescription" f NodeOutput
                              +           NodeOutput
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NodeOutput'tensorDescription
                              +                 (\ x__ y__ -> x__{_NodeOutput'tensorDescription = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default NodeOutput where
                              +        def
                              +          = NodeOutput{_NodeOutput'slot = Data.ProtoLens.fieldDefault,
                              +                       _NodeOutput'tensorDescription = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message NodeOutput where
                              +        descriptor
                              +          = let slot__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "slot"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional slot)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeOutput
                              +                tensorDescription__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor_description"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription)
                              +                      (Data.ProtoLens.OptionalField maybe'tensorDescription)
                              +                      :: Data.ProtoLens.FieldDescriptor NodeOutput
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.NodeOutput")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, slot__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, tensorDescription__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("slot", slot__field_descriptor),
                              +                    ("tensor_description", tensorDescription__field_descriptor)])
                              +
                              +data StepStats = StepStats{_StepStats'devStats ::
                              +                           ![DeviceStepStats]}
                              +               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [DeviceStepStats], b ~ [DeviceStepStats],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "devStats" f StepStats StepStats a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _StepStats'devStats
                              +                 (\ x__ y__ -> x__{_StepStats'devStats = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default StepStats where
                              +        def = StepStats{_StepStats'devStats = []}
                              +
                              +instance Data.ProtoLens.Message StepStats where
                              +        descriptor
                              +          = let devStats__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dev_stats"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor DeviceStepStats)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked devStats)
                              +                      :: Data.ProtoLens.FieldDescriptor StepStats
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.StepStats")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, devStats__field_descriptor)])
                              +                (Data.Map.fromList [("dev_stats", devStats__field_descriptor)])
                              +
                              +allEndRelMicros ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "allEndRelMicros" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +allEndRelMicros
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allEndRelMicros")
                              +
                              +allStartMicros ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "allStartMicros" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +allStartMicros
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allStartMicros")
                              +
                              +allocatorBytesInUse ::
                              +                    forall f s t a b .
                              +                      (Lens.Labels.HasLens "allocatorBytesInUse" f s t a b) =>
                              +                      Lens.Family2.LensLike f s t a b
                              +allocatorBytesInUse
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "allocatorBytesInUse")
                              +
                              +allocatorName ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "allocatorName" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +allocatorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allocatorName")
                              +
                              +devStats ::
                              +         forall f s t a b . (Lens.Labels.HasLens "devStats" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +devStats
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "devStats")
                              +
                              +device ::
                              +       forall f s t a b . (Lens.Labels.HasLens "device" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +device
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "device")
                              +
                              +devicePersistentMemorySize ::
                              +                           forall f s t a b .
                              +                             (Lens.Labels.HasLens "devicePersistentMemorySize" f s t a b) =>
                              +                             Lens.Family2.LensLike f s t a b
                              +devicePersistentMemorySize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "devicePersistentMemorySize")
                              +
                              +devicePersistentTensorAllocIds ::
                              +                               forall f s t a b .
                              +                                 (Lens.Labels.HasLens "devicePersistentTensorAllocIds" f s t a b) =>
                              +                                 Lens.Family2.LensLike f s t a b
                              +devicePersistentTensorAllocIds
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "devicePersistentTensorAllocIds")
                              +
                              +deviceTempMemorySize ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "deviceTempMemorySize" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +deviceTempMemorySize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "deviceTempMemorySize")
                              +
                              +hostPersistentMemorySize ::
                              +                         forall f s t a b .
                              +                           (Lens.Labels.HasLens "hostPersistentMemorySize" f s t a b) =>
                              +                           Lens.Family2.LensLike f s t a b
                              +hostPersistentMemorySize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "hostPersistentMemorySize")
                              +
                              +hostPersistentTensorAllocIds ::
                              +                             forall f s t a b .
                              +                               (Lens.Labels.HasLens "hostPersistentTensorAllocIds" f s t a b) =>
                              +                               Lens.Family2.LensLike f s t a b
                              +hostPersistentTensorAllocIds
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "hostPersistentTensorAllocIds")
                              +
                              +hostTempMemorySize ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "hostTempMemorySize" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +hostTempMemorySize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "hostTempMemorySize")
                              +
                              +liveBytes ::
                              +          forall f s t a b . (Lens.Labels.HasLens "liveBytes" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +liveBytes
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "liveBytes")
                              +
                              +maybe'memoryStats ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'memoryStats" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'memoryStats
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'memoryStats")
                              +
                              +maybe'tensorDescription ::
                              +                        forall f s t a b .
                              +                          (Lens.Labels.HasLens "maybe'tensorDescription" f s t a b) =>
                              +                          Lens.Family2.LensLike f s t a b
                              +maybe'tensorDescription
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'tensorDescription")
                              +
                              +memory ::
                              +       forall f s t a b . (Lens.Labels.HasLens "memory" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +memory
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "memory")
                              +
                              +memoryStats ::
                              +            forall f s t a b . (Lens.Labels.HasLens "memoryStats" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +memoryStats
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "memoryStats")
                              +
                              +nodeName ::
                              +         forall f s t a b . (Lens.Labels.HasLens "nodeName" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +nodeName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "nodeName")
                              +
                              +nodeStats ::
                              +          forall f s t a b . (Lens.Labels.HasLens "nodeStats" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +nodeStats
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "nodeStats")
                              +
                              +opEndRelMicros ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "opEndRelMicros" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +opEndRelMicros
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "opEndRelMicros")
                              +
                              +opStartRelMicros ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "opStartRelMicros" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +opStartRelMicros
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "opStartRelMicros")
                              +
                              +output ::
                              +       forall f s t a b . (Lens.Labels.HasLens "output" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +output
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "output")
                              +
                              +peakBytes ::
                              +          forall f s t a b . (Lens.Labels.HasLens "peakBytes" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +peakBytes
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "peakBytes")
                              +
                              +referencedTensor ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "referencedTensor" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +referencedTensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "referencedTensor")
                              +
                              +scheduledMicros ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "scheduledMicros" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +scheduledMicros
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "scheduledMicros")
                              +
                              +slot ::
                              +     forall f s t a b . (Lens.Labels.HasLens "slot" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +slot
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "slot")
                              +
                              +tensorDescription ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "tensorDescription" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +tensorDescription
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensorDescription")
                              +
                              +threadId ::
                              +         forall f s t a b . (Lens.Labels.HasLens "threadId" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +threadId
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "threadId")
                              +
                              +timelineLabel ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "timelineLabel" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +timelineLabel
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "timelineLabel")
                              +
                              +totalBytes ::
                              +           forall f s t a b . (Lens.Labels.HasLens "totalBytes" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +totalBytes
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "totalBytes")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Summary.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Summary.html new file mode 100644 index 0000000..8588ca6 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Summary.html @@ -0,0 +1,1299 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/summary.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.Summary where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.Tensor
                              +
                              +data HistogramProto = HistogramProto{_HistogramProto'min ::
                              +                                     !Prelude.Double,
                              +                                     _HistogramProto'max :: !Prelude.Double,
                              +                                     _HistogramProto'num :: !Prelude.Double,
                              +                                     _HistogramProto'sum :: !Prelude.Double,
                              +                                     _HistogramProto'sumSquares :: !Prelude.Double,
                              +                                     _HistogramProto'bucketLimit :: ![Prelude.Double],
                              +                                     _HistogramProto'bucket :: ![Prelude.Double]}
                              +                    deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "min" f HistogramProto HistogramProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _HistogramProto'min
                              +                 (\ x__ y__ -> x__{_HistogramProto'min = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "max" f HistogramProto HistogramProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _HistogramProto'max
                              +                 (\ x__ y__ -> x__{_HistogramProto'max = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "num" f HistogramProto HistogramProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _HistogramProto'num
                              +                 (\ x__ y__ -> x__{_HistogramProto'num = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "sum" f HistogramProto HistogramProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _HistogramProto'sum
                              +                 (\ x__ y__ -> x__{_HistogramProto'sum = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "sumSquares" f HistogramProto HistogramProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _HistogramProto'sumSquares
                              +                 (\ x__ y__ -> x__{_HistogramProto'sumSquares = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Prelude.Double], b ~ [Prelude.Double],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "bucketLimit" f HistogramProto HistogramProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _HistogramProto'bucketLimit
                              +                 (\ x__ y__ -> x__{_HistogramProto'bucketLimit = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Prelude.Double], b ~ [Prelude.Double],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "bucket" f HistogramProto HistogramProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _HistogramProto'bucket
                              +                 (\ x__ y__ -> x__{_HistogramProto'bucket = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default HistogramProto where
                              +        def
                              +          = HistogramProto{_HistogramProto'min = Data.ProtoLens.fieldDefault,
                              +                           _HistogramProto'max = Data.ProtoLens.fieldDefault,
                              +                           _HistogramProto'num = Data.ProtoLens.fieldDefault,
                              +                           _HistogramProto'sum = Data.ProtoLens.fieldDefault,
                              +                           _HistogramProto'sumSquares = Data.ProtoLens.fieldDefault,
                              +                           _HistogramProto'bucketLimit = [], _HistogramProto'bucket = []}
                              +
                              +instance Data.ProtoLens.Message HistogramProto where
                              +        descriptor
                              +          = let min__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "min"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional min)
                              +                      :: Data.ProtoLens.FieldDescriptor HistogramProto
                              +                max__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "max"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional max)
                              +                      :: Data.ProtoLens.FieldDescriptor HistogramProto
                              +                num__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "num"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional num)
                              +                      :: Data.ProtoLens.FieldDescriptor HistogramProto
                              +                sum__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "sum"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional sum)
                              +                      :: Data.ProtoLens.FieldDescriptor HistogramProto
                              +                sumSquares__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "sum_squares"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional sumSquares)
                              +                      :: Data.ProtoLens.FieldDescriptor HistogramProto
                              +                bucketLimit__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "bucket_limit"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed bucketLimit)
                              +                      :: Data.ProtoLens.FieldDescriptor HistogramProto
                              +                bucket__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "bucket"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed bucket)
                              +                      :: Data.ProtoLens.FieldDescriptor HistogramProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.HistogramProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, min__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, max__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, num__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, sum__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, sumSquares__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, bucketLimit__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, bucket__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("min", min__field_descriptor), ("max", max__field_descriptor),
                              +                    ("num", num__field_descriptor), ("sum", sum__field_descriptor),
                              +                    ("sum_squares", sumSquares__field_descriptor),
                              +                    ("bucket_limit", bucketLimit__field_descriptor),
                              +                    ("bucket", bucket__field_descriptor)])
                              +
                              +data Summary = Summary{_Summary'value :: ![Summary'Value]}
                              +             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Summary'Value], b ~ [Summary'Value],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f Summary Summary a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'value
                              +                 (\ x__ y__ -> x__{_Summary'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default Summary where
                              +        def = Summary{_Summary'value = []}
                              +
                              +instance Data.ProtoLens.Message Summary where
                              +        descriptor
                              +          = let value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Summary'Value)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked value)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Summary")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, value__field_descriptor)])
                              +                (Data.Map.fromList [("value", value__field_descriptor)])
                              +
                              +data Summary'Audio = Summary'Audio{_Summary'Audio'sampleRate ::
                              +                                   !Prelude.Float,
                              +                                   _Summary'Audio'numChannels :: !Data.Int.Int64,
                              +                                   _Summary'Audio'lengthFrames :: !Data.Int.Int64,
                              +                                   _Summary'Audio'encodedAudioString :: !Data.ByteString.ByteString,
                              +                                   _Summary'Audio'contentType :: !Data.Text.Text}
                              +                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Float, b ~ Prelude.Float,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "sampleRate" f Summary'Audio Summary'Audio a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Audio'sampleRate
                              +                 (\ x__ y__ -> x__{_Summary'Audio'sampleRate = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "numChannels" f Summary'Audio Summary'Audio a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Audio'numChannels
                              +                 (\ x__ y__ -> x__{_Summary'Audio'numChannels = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "lengthFrames" f Summary'Audio Summary'Audio a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Audio'lengthFrames
                              +                 (\ x__ y__ -> x__{_Summary'Audio'lengthFrames = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.ByteString.ByteString,
                              +          b ~ Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "encodedAudioString" f Summary'Audio
                              +           Summary'Audio
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Audio'encodedAudioString
                              +                 (\ x__ y__ -> x__{_Summary'Audio'encodedAudioString = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "contentType" f Summary'Audio Summary'Audio a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Audio'contentType
                              +                 (\ x__ y__ -> x__{_Summary'Audio'contentType = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default Summary'Audio where
                              +        def
                              +          = Summary'Audio{_Summary'Audio'sampleRate =
                              +                            Data.ProtoLens.fieldDefault,
                              +                          _Summary'Audio'numChannels = Data.ProtoLens.fieldDefault,
                              +                          _Summary'Audio'lengthFrames = Data.ProtoLens.fieldDefault,
                              +                          _Summary'Audio'encodedAudioString = Data.ProtoLens.fieldDefault,
                              +                          _Summary'Audio'contentType = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message Summary'Audio where
                              +        descriptor
                              +          = let sampleRate__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "sample_rate"
                              +                      (Data.ProtoLens.FloatField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional sampleRate)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Audio
                              +                numChannels__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "num_channels"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numChannels)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Audio
                              +                lengthFrames__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "length_frames"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional lengthFrames)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Audio
                              +                encodedAudioString__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "encoded_audio_string"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         encodedAudioString)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Audio
                              +                contentType__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "content_type"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional contentType)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Audio
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Summary.Audio")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, sampleRate__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, numChannels__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, lengthFrames__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, encodedAudioString__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, contentType__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("sample_rate", sampleRate__field_descriptor),
                              +                    ("num_channels", numChannels__field_descriptor),
                              +                    ("length_frames", lengthFrames__field_descriptor),
                              +                    ("encoded_audio_string", encodedAudioString__field_descriptor),
                              +                    ("content_type", contentType__field_descriptor)])
                              +
                              +data Summary'Image = Summary'Image{_Summary'Image'height ::
                              +                                   !Data.Int.Int32,
                              +                                   _Summary'Image'width :: !Data.Int.Int32,
                              +                                   _Summary'Image'colorspace :: !Data.Int.Int32,
                              +                                   _Summary'Image'encodedImageString :: !Data.ByteString.ByteString}
                              +                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "height" f Summary'Image Summary'Image a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Image'height
                              +                 (\ x__ y__ -> x__{_Summary'Image'height = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "width" f Summary'Image Summary'Image a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Image'width
                              +                 (\ x__ y__ -> x__{_Summary'Image'width = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "colorspace" f Summary'Image Summary'Image a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Image'colorspace
                              +                 (\ x__ y__ -> x__{_Summary'Image'colorspace = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.ByteString.ByteString,
                              +          b ~ Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "encodedImageString" f Summary'Image
                              +           Summary'Image
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Image'encodedImageString
                              +                 (\ x__ y__ -> x__{_Summary'Image'encodedImageString = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default Summary'Image where
                              +        def
                              +          = Summary'Image{_Summary'Image'height =
                              +                            Data.ProtoLens.fieldDefault,
                              +                          _Summary'Image'width = Data.ProtoLens.fieldDefault,
                              +                          _Summary'Image'colorspace = Data.ProtoLens.fieldDefault,
                              +                          _Summary'Image'encodedImageString = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message Summary'Image where
                              +        descriptor
                              +          = let height__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "height"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional height)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Image
                              +                width__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "width"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional width)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Image
                              +                colorspace__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "colorspace"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional colorspace)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Image
                              +                encodedImageString__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "encoded_image_string"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         encodedImageString)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Image
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Summary.Image")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, height__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, width__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, colorspace__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, encodedImageString__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("height", height__field_descriptor),
                              +                    ("width", width__field_descriptor),
                              +                    ("colorspace", colorspace__field_descriptor),
                              +                    ("encoded_image_string", encodedImageString__field_descriptor)])
                              +
                              +data Summary'Value = Summary'Value{_Summary'Value'nodeName ::
                              +                                   !Data.Text.Text,
                              +                                   _Summary'Value'tag :: !Data.Text.Text,
                              +                                   _Summary'Value'metadata :: !(Prelude.Maybe SummaryMetadata),
                              +                                   _Summary'Value'value :: !(Prelude.Maybe Summary'Value'Value)}
                              +                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data Summary'Value'Value = Summary'Value'SimpleValue !Prelude.Float
                              +                         | Summary'Value'ObsoleteOldStyleHistogram !Data.ByteString.ByteString
                              +                         | Summary'Value'Image !Summary'Image
                              +                         | Summary'Value'Histo !HistogramProto
                              +                         | Summary'Value'Audio !Summary'Audio
                              +                         | Summary'Value'Tensor !Proto.Tensorflow.Core.Framework.Tensor.TensorProto
                              +                         deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "nodeName" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'nodeName
                              +                 (\ x__ y__ -> x__{_Summary'Value'nodeName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tag" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'tag
                              +                 (\ x__ y__ -> x__{_Summary'Value'tag = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ SummaryMetadata, b ~ SummaryMetadata,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "metadata" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'metadata
                              +                 (\ x__ y__ -> x__{_Summary'Value'metadata = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe SummaryMetadata,
                              +          b ~ Prelude.Maybe SummaryMetadata, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'metadata" f Summary'Value Summary'Value
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'metadata
                              +                 (\ x__ y__ -> x__{_Summary'Value'metadata = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe Summary'Value'Value,
                              +          b ~ Prelude.Maybe Summary'Value'Value, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe Prelude.Float,
                              +          b ~ Prelude.Maybe Prelude.Float, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'simpleValue" f Summary'Value
                              +           Summary'Value
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Summary'Value'SimpleValue x__val) -> Prelude.Just
                              +                                                                             x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Summary'Value'SimpleValue y__))
                              +
                              +instance (a ~ Prelude.Float, b ~ Prelude.Float,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "simpleValue" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Summary'Value'SimpleValue x__val) -> Prelude.Just
                              +                                                                                x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Summary'Value'SimpleValue y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~ Prelude.Maybe Data.ByteString.ByteString,
                              +          b ~ Prelude.Maybe Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'obsoleteOldStyleHistogram" f
                              +           Summary'Value
                              +           Summary'Value
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just
                              +                          (Summary'Value'ObsoleteOldStyleHistogram x__val) -> Prelude.Just
                              +                                                                                x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ ->
                              +                    Prelude.fmap Summary'Value'ObsoleteOldStyleHistogram y__))
                              +
                              +instance (a ~ Data.ByteString.ByteString,
                              +          b ~ Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "obsoleteOldStyleHistogram" f Summary'Value
                              +           Summary'Value
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just
                              +                             (Summary'Value'ObsoleteOldStyleHistogram x__val) -> Prelude.Just
                              +                                                                                   x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ ->
                              +                       Prelude.fmap Summary'Value'ObsoleteOldStyleHistogram y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~ Prelude.Maybe Summary'Image,
                              +          b ~ Prelude.Maybe Summary'Image, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'image" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Summary'Value'Image x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Summary'Value'Image y__))
                              +
                              +instance (a ~ Summary'Image, b ~ Summary'Image,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "image" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Summary'Value'Image x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Summary'Value'Image y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe HistogramProto,
                              +          b ~ Prelude.Maybe HistogramProto, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'histo" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Summary'Value'Histo x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Summary'Value'Histo y__))
                              +
                              +instance (a ~ HistogramProto, b ~ HistogramProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "histo" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Summary'Value'Histo x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Summary'Value'Histo y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe Summary'Audio,
                              +          b ~ Prelude.Maybe Summary'Audio, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'audio" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Summary'Value'Audio x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Summary'Value'Audio y__))
                              +
                              +instance (a ~ Summary'Audio, b ~ Summary'Audio,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "audio" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Summary'Value'Audio x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Summary'Value'Audio y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'tensor" f Summary'Value Summary'Value a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Summary'Value'Tensor x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Summary'Value'Tensor y__))
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensor" f Summary'Value Summary'Value a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Summary'Value'value
                              +                 (\ x__ y__ -> x__{_Summary'Value'value = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Summary'Value'Tensor x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Summary'Value'Tensor y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance Data.Default.Class.Default Summary'Value where
                              +        def
                              +          = Summary'Value{_Summary'Value'nodeName =
                              +                            Data.ProtoLens.fieldDefault,
                              +                          _Summary'Value'tag = Data.ProtoLens.fieldDefault,
                              +                          _Summary'Value'metadata = Prelude.Nothing,
                              +                          _Summary'Value'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message Summary'Value where
                              +        descriptor
                              +          = let nodeName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "node_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional nodeName)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Value
                              +                tag__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tag"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional tag)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Value
                              +                metadata__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "metadata"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SummaryMetadata)
                              +                      (Data.ProtoLens.OptionalField maybe'metadata)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Value
                              +                simpleValue__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "simple_value"
                              +                      (Data.ProtoLens.FloatField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
                              +                      (Data.ProtoLens.OptionalField maybe'simpleValue)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Value
                              +                obsoleteOldStyleHistogram__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "obsolete_old_style_histogram"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.OptionalField maybe'obsoleteOldStyleHistogram)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Value
                              +                image__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "image"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Summary'Image)
                              +                      (Data.ProtoLens.OptionalField maybe'image)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Value
                              +                histo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "histo"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor HistogramProto)
                              +                      (Data.ProtoLens.OptionalField maybe'histo)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Value
                              +                audio__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "audio"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Summary'Audio)
                              +                      (Data.ProtoLens.OptionalField maybe'audio)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Value
                              +                tensor__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
                              +                      (Data.ProtoLens.OptionalField maybe'tensor)
                              +                      :: Data.ProtoLens.FieldDescriptor Summary'Value
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Summary.Value")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 7, nodeName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 1, tag__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, metadata__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, simpleValue__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3,
                              +                     obsoleteOldStyleHistogram__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, image__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, histo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, audio__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, tensor__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("node_name", nodeName__field_descriptor),
                              +                    ("tag", tag__field_descriptor),
                              +                    ("metadata", metadata__field_descriptor),
                              +                    ("simple_value", simpleValue__field_descriptor),
                              +                    ("obsolete_old_style_histogram",
                              +                     obsoleteOldStyleHistogram__field_descriptor),
                              +                    ("image", image__field_descriptor),
                              +                    ("histo", histo__field_descriptor),
                              +                    ("audio", audio__field_descriptor),
                              +                    ("tensor", tensor__field_descriptor)])
                              +
                              +data SummaryDescription = SummaryDescription{_SummaryDescription'typeHint
                              +                                             :: !Data.Text.Text}
                              +                        deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "typeHint" f SummaryDescription
                              +           SummaryDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SummaryDescription'typeHint
                              +                 (\ x__ y__ -> x__{_SummaryDescription'typeHint = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SummaryDescription where
                              +        def
                              +          = SummaryDescription{_SummaryDescription'typeHint =
                              +                                 Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message SummaryDescription where
                              +        descriptor
                              +          = let typeHint__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "type_hint"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional typeHint)
                              +                      :: Data.ProtoLens.FieldDescriptor SummaryDescription
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SummaryDescription")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, typeHint__field_descriptor)])
                              +                (Data.Map.fromList [("type_hint", typeHint__field_descriptor)])
                              +
                              +data SummaryMetadata = SummaryMetadata{_SummaryMetadata'pluginData
                              +                                       :: !(Prelude.Maybe SummaryMetadata'PluginData),
                              +                                       _SummaryMetadata'displayName :: !Data.Text.Text,
                              +                                       _SummaryMetadata'summaryDescription :: !Data.Text.Text}
                              +                     deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ SummaryMetadata'PluginData,
                              +          b ~ SummaryMetadata'PluginData, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "pluginData" f SummaryMetadata SummaryMetadata
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SummaryMetadata'pluginData
                              +                 (\ x__ y__ -> x__{_SummaryMetadata'pluginData = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe SummaryMetadata'PluginData,
                              +          b ~ Prelude.Maybe SummaryMetadata'PluginData, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'pluginData" f SummaryMetadata
                              +           SummaryMetadata
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SummaryMetadata'pluginData
                              +                 (\ x__ y__ -> x__{_SummaryMetadata'pluginData = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "displayName" f SummaryMetadata SummaryMetadata
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SummaryMetadata'displayName
                              +                 (\ x__ y__ -> x__{_SummaryMetadata'displayName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "summaryDescription" f SummaryMetadata
                              +           SummaryMetadata
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SummaryMetadata'summaryDescription
                              +                 (\ x__ y__ -> x__{_SummaryMetadata'summaryDescription = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SummaryMetadata where
                              +        def
                              +          = SummaryMetadata{_SummaryMetadata'pluginData = Prelude.Nothing,
                              +                            _SummaryMetadata'displayName = Data.ProtoLens.fieldDefault,
                              +                            _SummaryMetadata'summaryDescription = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message SummaryMetadata where
                              +        descriptor
                              +          = let pluginData__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "plugin_data"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SummaryMetadata'PluginData)
                              +                      (Data.ProtoLens.OptionalField maybe'pluginData)
                              +                      :: Data.ProtoLens.FieldDescriptor SummaryMetadata
                              +                displayName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "display_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional displayName)
                              +                      :: Data.ProtoLens.FieldDescriptor SummaryMetadata
                              +                summaryDescription__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "summary_description"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         summaryDescription)
                              +                      :: Data.ProtoLens.FieldDescriptor SummaryMetadata
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SummaryMetadata")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, pluginData__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, displayName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, summaryDescription__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("plugin_data", pluginData__field_descriptor),
                              +                    ("display_name", displayName__field_descriptor),
                              +                    ("summary_description", summaryDescription__field_descriptor)])
                              +
                              +data SummaryMetadata'PluginData = SummaryMetadata'PluginData{_SummaryMetadata'PluginData'pluginName
                              +                                                             :: !Data.Text.Text,
                              +                                                             _SummaryMetadata'PluginData'content ::
                              +                                                             !Data.Text.Text}
                              +                                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "pluginName" f SummaryMetadata'PluginData
                              +           SummaryMetadata'PluginData
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SummaryMetadata'PluginData'pluginName
                              +                 (\ x__ y__ -> x__{_SummaryMetadata'PluginData'pluginName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "content" f SummaryMetadata'PluginData
                              +           SummaryMetadata'PluginData
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SummaryMetadata'PluginData'content
                              +                 (\ x__ y__ -> x__{_SummaryMetadata'PluginData'content = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SummaryMetadata'PluginData
                              +         where
                              +        def
                              +          = SummaryMetadata'PluginData{_SummaryMetadata'PluginData'pluginName
                              +                                         = Data.ProtoLens.fieldDefault,
                              +                                       _SummaryMetadata'PluginData'content =
                              +                                         Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message SummaryMetadata'PluginData where
                              +        descriptor
                              +          = let pluginName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "plugin_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional pluginName)
                              +                      :: Data.ProtoLens.FieldDescriptor SummaryMetadata'PluginData
                              +                content__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "content"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional content)
                              +                      :: Data.ProtoLens.FieldDescriptor SummaryMetadata'PluginData
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SummaryMetadata.PluginData")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, pluginName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, content__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("plugin_name", pluginName__field_descriptor),
                              +                    ("content", content__field_descriptor)])
                              +
                              +audio ::
                              +      forall f s t a b . (Lens.Labels.HasLens "audio" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +audio
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "audio")
                              +
                              +bucket ::
                              +       forall f s t a b . (Lens.Labels.HasLens "bucket" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +bucket
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "bucket")
                              +
                              +bucketLimit ::
                              +            forall f s t a b . (Lens.Labels.HasLens "bucketLimit" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +bucketLimit
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "bucketLimit")
                              +
                              +colorspace ::
                              +           forall f s t a b . (Lens.Labels.HasLens "colorspace" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +colorspace
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "colorspace")
                              +
                              +content ::
                              +        forall f s t a b . (Lens.Labels.HasLens "content" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +content
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "content")
                              +
                              +contentType ::
                              +            forall f s t a b . (Lens.Labels.HasLens "contentType" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +contentType
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "contentType")
                              +
                              +displayName ::
                              +            forall f s t a b . (Lens.Labels.HasLens "displayName" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +displayName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "displayName")
                              +
                              +encodedAudioString ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "encodedAudioString" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +encodedAudioString
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "encodedAudioString")
                              +
                              +encodedImageString ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "encodedImageString" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +encodedImageString
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "encodedImageString")
                              +
                              +height ::
                              +       forall f s t a b . (Lens.Labels.HasLens "height" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +height
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "height")
                              +
                              +histo ::
                              +      forall f s t a b . (Lens.Labels.HasLens "histo" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +histo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "histo")
                              +
                              +image ::
                              +      forall f s t a b . (Lens.Labels.HasLens "image" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +image
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "image")
                              +
                              +lengthFrames ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "lengthFrames" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +lengthFrames
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "lengthFrames")
                              +
                              +max ::
                              +    forall f s t a b . (Lens.Labels.HasLens "max" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +max
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "max")
                              +
                              +maybe'audio ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'audio" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'audio
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'audio")
                              +
                              +maybe'histo ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'histo" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'histo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'histo")
                              +
                              +maybe'image ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'image" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'image
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'image")
                              +
                              +maybe'metadata ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'metadata" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'metadata
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'metadata")
                              +
                              +maybe'obsoleteOldStyleHistogram ::
                              +                                forall f s t a b .
                              +                                  (Lens.Labels.HasLens "maybe'obsoleteOldStyleHistogram" f s t a
                              +                                     b) =>
                              +                                  Lens.Family2.LensLike f s t a b
                              +maybe'obsoleteOldStyleHistogram
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'obsoleteOldStyleHistogram")
                              +
                              +maybe'pluginData ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "maybe'pluginData" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +maybe'pluginData
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'pluginData")
                              +
                              +maybe'simpleValue ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'simpleValue" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'simpleValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'simpleValue")
                              +
                              +maybe'tensor ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "maybe'tensor" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +maybe'tensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'tensor")
                              +
                              +maybe'value ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'value" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'value")
                              +
                              +metadata ::
                              +         forall f s t a b . (Lens.Labels.HasLens "metadata" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +metadata
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "metadata")
                              +
                              +min ::
                              +    forall f s t a b . (Lens.Labels.HasLens "min" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +min
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "min")
                              +
                              +nodeName ::
                              +         forall f s t a b . (Lens.Labels.HasLens "nodeName" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +nodeName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "nodeName")
                              +
                              +num ::
                              +    forall f s t a b . (Lens.Labels.HasLens "num" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +num
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "num")
                              +
                              +numChannels ::
                              +            forall f s t a b . (Lens.Labels.HasLens "numChannels" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +numChannels
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "numChannels")
                              +
                              +obsoleteOldStyleHistogram ::
                              +                          forall f s t a b .
                              +                            (Lens.Labels.HasLens "obsoleteOldStyleHistogram" f s t a b) =>
                              +                            Lens.Family2.LensLike f s t a b
                              +obsoleteOldStyleHistogram
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "obsoleteOldStyleHistogram")
                              +
                              +pluginData ::
                              +           forall f s t a b . (Lens.Labels.HasLens "pluginData" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +pluginData
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "pluginData")
                              +
                              +pluginName ::
                              +           forall f s t a b . (Lens.Labels.HasLens "pluginName" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +pluginName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "pluginName")
                              +
                              +sampleRate ::
                              +           forall f s t a b . (Lens.Labels.HasLens "sampleRate" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +sampleRate
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "sampleRate")
                              +
                              +simpleValue ::
                              +            forall f s t a b . (Lens.Labels.HasLens "simpleValue" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +simpleValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "simpleValue")
                              +
                              +sum ::
                              +    forall f s t a b . (Lens.Labels.HasLens "sum" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +sum
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "sum")
                              +
                              +sumSquares ::
                              +           forall f s t a b . (Lens.Labels.HasLens "sumSquares" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +sumSquares
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "sumSquares")
                              +
                              +summaryDescription ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "summaryDescription" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +summaryDescription
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "summaryDescription")
                              +
                              +tag ::
                              +    forall f s t a b . (Lens.Labels.HasLens "tag" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +tag
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tag")
                              +
                              +tensor ::
                              +       forall f s t a b . (Lens.Labels.HasLens "tensor" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +tensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensor")
                              +
                              +typeHint ::
                              +         forall f s t a b . (Lens.Labels.HasLens "typeHint" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +typeHint
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "typeHint")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              +
                              +width ::
                              +      forall f s t a b . (Lens.Labels.HasLens "width" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +width
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "width")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Tensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Tensor.html new file mode 100644 index 0000000..3ba5c09 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Tensor.html @@ -0,0 +1,457 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/tensor.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.Tensor where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.ResourceHandle
                              +import qualified Proto.Tensorflow.Core.Framework.TensorShape
                              +import qualified Proto.Tensorflow.Core.Framework.Types
                              +
                              +data TensorProto = TensorProto{_TensorProto'dtype ::
                              +                               !Proto.Tensorflow.Core.Framework.Types.DataType,
                              +                               _TensorProto'tensorShape ::
                              +                               !(Prelude.Maybe
                              +                                   Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto),
                              +                               _TensorProto'versionNumber :: !Data.Int.Int32,
                              +                               _TensorProto'tensorContent :: !Data.ByteString.ByteString,
                              +                               _TensorProto'halfVal :: ![Data.Int.Int32],
                              +                               _TensorProto'floatVal :: ![Prelude.Float],
                              +                               _TensorProto'doubleVal :: ![Prelude.Double],
                              +                               _TensorProto'intVal :: ![Data.Int.Int32],
                              +                               _TensorProto'stringVal :: ![Data.ByteString.ByteString],
                              +                               _TensorProto'scomplexVal :: ![Prelude.Float],
                              +                               _TensorProto'int64Val :: ![Data.Int.Int64],
                              +                               _TensorProto'boolVal :: ![Prelude.Bool],
                              +                               _TensorProto'dcomplexVal :: ![Prelude.Double],
                              +                               _TensorProto'resourceHandleVal ::
                              +                               ![Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandleProto]}
                              +                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "dtype" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'dtype
                              +                 (\ x__ y__ -> x__{_TensorProto'dtype = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensorShape" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'tensorShape
                              +                 (\ x__ y__ -> x__{_TensorProto'tensorShape = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'tensorShape" f TensorProto TensorProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'tensorShape
                              +                 (\ x__ y__ -> x__{_TensorProto'tensorShape = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "versionNumber" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'versionNumber
                              +                 (\ x__ y__ -> x__{_TensorProto'versionNumber = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.ByteString.ByteString,
                              +          b ~ Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensorContent" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'tensorContent
                              +                 (\ x__ y__ -> x__{_TensorProto'tensorContent = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int32], b ~ [Data.Int.Int32],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "halfVal" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'halfVal
                              +                 (\ x__ y__ -> x__{_TensorProto'halfVal = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Prelude.Float], b ~ [Prelude.Float],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "floatVal" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'floatVal
                              +                 (\ x__ y__ -> x__{_TensorProto'floatVal = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Prelude.Double], b ~ [Prelude.Double],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "doubleVal" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'doubleVal
                              +                 (\ x__ y__ -> x__{_TensorProto'doubleVal = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int32], b ~ [Data.Int.Int32],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "intVal" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'intVal
                              +                 (\ x__ y__ -> x__{_TensorProto'intVal = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.ByteString.ByteString],
                              +          b ~ [Data.ByteString.ByteString], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "stringVal" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'stringVal
                              +                 (\ x__ y__ -> x__{_TensorProto'stringVal = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Prelude.Float], b ~ [Prelude.Float],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "scomplexVal" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'scomplexVal
                              +                 (\ x__ y__ -> x__{_TensorProto'scomplexVal = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int64], b ~ [Data.Int.Int64],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "int64Val" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'int64Val
                              +                 (\ x__ y__ -> x__{_TensorProto'int64Val = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Prelude.Bool], b ~ [Prelude.Bool],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "boolVal" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'boolVal
                              +                 (\ x__ y__ -> x__{_TensorProto'boolVal = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Prelude.Double], b ~ [Prelude.Double],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "dcomplexVal" f TensorProto TensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'dcomplexVal
                              +                 (\ x__ y__ -> x__{_TensorProto'dcomplexVal = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            [Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandleProto],
                              +          b ~
                              +            [Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandleProto],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "resourceHandleVal" f TensorProto TensorProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorProto'resourceHandleVal
                              +                 (\ x__ y__ -> x__{_TensorProto'resourceHandleVal = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default TensorProto where
                              +        def
                              +          = TensorProto{_TensorProto'dtype = Data.Default.Class.def,
                              +                        _TensorProto'tensorShape = Prelude.Nothing,
                              +                        _TensorProto'versionNumber = Data.ProtoLens.fieldDefault,
                              +                        _TensorProto'tensorContent = Data.ProtoLens.fieldDefault,
                              +                        _TensorProto'halfVal = [], _TensorProto'floatVal = [],
                              +                        _TensorProto'doubleVal = [], _TensorProto'intVal = [],
                              +                        _TensorProto'stringVal = [], _TensorProto'scomplexVal = [],
                              +                        _TensorProto'int64Val = [], _TensorProto'boolVal = [],
                              +                        _TensorProto'dcomplexVal = [], _TensorProto'resourceHandleVal = []}
                              +
                              +instance Data.ProtoLens.Message TensorProto where
                              +        descriptor
                              +          = let dtype__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dtype"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                tensorShape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor_shape"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
                              +                      (Data.ProtoLens.OptionalField maybe'tensorShape)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                versionNumber__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "version_number"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional versionNumber)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                tensorContent__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor_content"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional tensorContent)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                halfVal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "half_val"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed halfVal)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                floatVal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "float_val"
                              +                      (Data.ProtoLens.FloatField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed floatVal)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                doubleVal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "double_val"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed doubleVal)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                intVal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "int_val"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed intVal)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                stringVal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "string_val"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked stringVal)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                scomplexVal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "scomplex_val"
                              +                      (Data.ProtoLens.FloatField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed scomplexVal)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                int64Val__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "int64_val"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed int64Val)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                boolVal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "bool_val"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed boolVal)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                dcomplexVal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dcomplex_val"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed dcomplexVal)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +                resourceHandleVal__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "resource_handle_val"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandleProto)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         resourceHandleVal)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TensorProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, dtype__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, tensorShape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, versionNumber__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, tensorContent__field_descriptor),
                              +                    (Data.ProtoLens.Tag 13, halfVal__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, floatVal__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, doubleVal__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, intVal__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, stringVal__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, scomplexVal__field_descriptor),
                              +                    (Data.ProtoLens.Tag 10, int64Val__field_descriptor),
                              +                    (Data.ProtoLens.Tag 11, boolVal__field_descriptor),
                              +                    (Data.ProtoLens.Tag 12, dcomplexVal__field_descriptor),
                              +                    (Data.ProtoLens.Tag 14, resourceHandleVal__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("dtype", dtype__field_descriptor),
                              +                    ("tensor_shape", tensorShape__field_descriptor),
                              +                    ("version_number", versionNumber__field_descriptor),
                              +                    ("tensor_content", tensorContent__field_descriptor),
                              +                    ("half_val", halfVal__field_descriptor),
                              +                    ("float_val", floatVal__field_descriptor),
                              +                    ("double_val", doubleVal__field_descriptor),
                              +                    ("int_val", intVal__field_descriptor),
                              +                    ("string_val", stringVal__field_descriptor),
                              +                    ("scomplex_val", scomplexVal__field_descriptor),
                              +                    ("int64_val", int64Val__field_descriptor),
                              +                    ("bool_val", boolVal__field_descriptor),
                              +                    ("dcomplex_val", dcomplexVal__field_descriptor),
                              +                    ("resource_handle_val", resourceHandleVal__field_descriptor)])
                              +
                              +boolVal ::
                              +        forall f s t a b . (Lens.Labels.HasLens "boolVal" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +boolVal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "boolVal")
                              +
                              +dcomplexVal ::
                              +            forall f s t a b . (Lens.Labels.HasLens "dcomplexVal" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +dcomplexVal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "dcomplexVal")
                              +
                              +doubleVal ::
                              +          forall f s t a b . (Lens.Labels.HasLens "doubleVal" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +doubleVal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "doubleVal")
                              +
                              +dtype ::
                              +      forall f s t a b . (Lens.Labels.HasLens "dtype" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +dtype
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "dtype")
                              +
                              +floatVal ::
                              +         forall f s t a b . (Lens.Labels.HasLens "floatVal" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +floatVal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "floatVal")
                              +
                              +halfVal ::
                              +        forall f s t a b . (Lens.Labels.HasLens "halfVal" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +halfVal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "halfVal")
                              +
                              +int64Val ::
                              +         forall f s t a b . (Lens.Labels.HasLens "int64Val" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +int64Val
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "int64Val")
                              +
                              +intVal ::
                              +       forall f s t a b . (Lens.Labels.HasLens "intVal" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +intVal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "intVal")
                              +
                              +maybe'tensorShape ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'tensorShape" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'tensorShape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'tensorShape")
                              +
                              +resourceHandleVal ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "resourceHandleVal" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +resourceHandleVal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "resourceHandleVal")
                              +
                              +scomplexVal ::
                              +            forall f s t a b . (Lens.Labels.HasLens "scomplexVal" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +scomplexVal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "scomplexVal")
                              +
                              +stringVal ::
                              +          forall f s t a b . (Lens.Labels.HasLens "stringVal" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +stringVal
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "stringVal")
                              +
                              +tensorContent ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "tensorContent" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +tensorContent
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensorContent")
                              +
                              +tensorShape ::
                              +            forall f s t a b . (Lens.Labels.HasLens "tensorShape" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +tensorShape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensorShape")
                              +
                              +versionNumber ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "versionNumber" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +versionNumber
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "versionNumber")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorDescription.html new file mode 100644 index 0000000..5a600e5 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorDescription.html @@ -0,0 +1,202 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/tensor_description.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.TensorDescription where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified
                              +       Proto.Tensorflow.Core.Framework.AllocationDescription
                              +import qualified Proto.Tensorflow.Core.Framework.TensorShape
                              +import qualified Proto.Tensorflow.Core.Framework.Types
                              +
                              +data TensorDescription = TensorDescription{_TensorDescription'dtype
                              +                                           :: !Proto.Tensorflow.Core.Framework.Types.DataType,
                              +                                           _TensorDescription'shape ::
                              +                                           !(Prelude.Maybe
                              +                                               Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto),
                              +                                           _TensorDescription'allocationDescription ::
                              +                                           !(Prelude.Maybe
                              +                                               Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription)}
                              +                       deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "dtype" f TensorDescription TensorDescription a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorDescription'dtype
                              +                 (\ x__ y__ -> x__{_TensorDescription'dtype = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "shape" f TensorDescription TensorDescription a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorDescription'shape
                              +                 (\ x__ y__ -> x__{_TensorDescription'shape = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'shape" f TensorDescription
                              +           TensorDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorDescription'shape
                              +                 (\ x__ y__ -> x__{_TensorDescription'shape = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription,
                              +          b ~
                              +            Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocationDescription" f TensorDescription
                              +           TensorDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _TensorDescription'allocationDescription
                              +                 (\ x__ y__ -> x__{_TensorDescription'allocationDescription = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'allocationDescription" f
                              +           TensorDescription
                              +           TensorDescription
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _TensorDescription'allocationDescription
                              +                 (\ x__ y__ -> x__{_TensorDescription'allocationDescription = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default TensorDescription where
                              +        def
                              +          = TensorDescription{_TensorDescription'dtype =
                              +                                Data.Default.Class.def,
                              +                              _TensorDescription'shape = Prelude.Nothing,
                              +                              _TensorDescription'allocationDescription = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message TensorDescription where
                              +        descriptor
                              +          = let dtype__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dtype"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorDescription
                              +                shape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "shape"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
                              +                      (Data.ProtoLens.OptionalField maybe'shape)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorDescription
                              +                allocationDescription__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocation_description"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription)
                              +                      (Data.ProtoLens.OptionalField maybe'allocationDescription)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorDescription
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TensorDescription")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, dtype__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, shape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, allocationDescription__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("dtype", dtype__field_descriptor),
                              +                    ("shape", shape__field_descriptor),
                              +                    ("allocation_description",
                              +                     allocationDescription__field_descriptor)])
                              +
                              +allocationDescription ::
                              +                      forall f s t a b .
                              +                        (Lens.Labels.HasLens "allocationDescription" f s t a b) =>
                              +                        Lens.Family2.LensLike f s t a b
                              +allocationDescription
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "allocationDescription")
                              +
                              +dtype ::
                              +      forall f s t a b . (Lens.Labels.HasLens "dtype" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +dtype
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "dtype")
                              +
                              +maybe'allocationDescription ::
                              +                            forall f s t a b .
                              +                              (Lens.Labels.HasLens "maybe'allocationDescription" f s t a b) =>
                              +                              Lens.Family2.LensLike f s t a b
                              +maybe'allocationDescription
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'allocationDescription")
                              +
                              +maybe'shape ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'shape" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'shape")
                              +
                              +shape ::
                              +      forall f s t a b . (Lens.Labels.HasLens "shape" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "shape")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorShape.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorShape.html new file mode 100644 index 0000000..3b65b1d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorShape.html @@ -0,0 +1,171 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/tensor_shape.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.TensorShape where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data TensorShapeProto = TensorShapeProto{_TensorShapeProto'dim ::
                              +                                         ![TensorShapeProto'Dim],
                              +                                         _TensorShapeProto'unknownRank :: !Prelude.Bool}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [TensorShapeProto'Dim], b ~ [TensorShapeProto'Dim],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "dim" f TensorShapeProto TensorShapeProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorShapeProto'dim
                              +                 (\ x__ y__ -> x__{_TensorShapeProto'dim = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "unknownRank" f TensorShapeProto
                              +           TensorShapeProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorShapeProto'unknownRank
                              +                 (\ x__ y__ -> x__{_TensorShapeProto'unknownRank = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default TensorShapeProto where
                              +        def
                              +          = TensorShapeProto{_TensorShapeProto'dim = [],
                              +                             _TensorShapeProto'unknownRank = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message TensorShapeProto where
                              +        descriptor
                              +          = let dim__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dim"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor TensorShapeProto'Dim)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked dim)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorShapeProto
                              +                unknownRank__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "unknown_rank"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional unknownRank)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorShapeProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TensorShapeProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 2, dim__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, unknownRank__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("dim", dim__field_descriptor),
                              +                    ("unknown_rank", unknownRank__field_descriptor)])
                              +
                              +data TensorShapeProto'Dim = TensorShapeProto'Dim{_TensorShapeProto'Dim'size
                              +                                                 :: !Data.Int.Int64,
                              +                                                 _TensorShapeProto'Dim'name :: !Data.Text.Text}
                              +                          deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "size" f TensorShapeProto'Dim
                              +           TensorShapeProto'Dim
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorShapeProto'Dim'size
                              +                 (\ x__ y__ -> x__{_TensorShapeProto'Dim'size = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f TensorShapeProto'Dim
                              +           TensorShapeProto'Dim
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorShapeProto'Dim'name
                              +                 (\ x__ y__ -> x__{_TensorShapeProto'Dim'name = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default TensorShapeProto'Dim where
                              +        def
                              +          = TensorShapeProto'Dim{_TensorShapeProto'Dim'size =
                              +                                   Data.ProtoLens.fieldDefault,
                              +                                 _TensorShapeProto'Dim'name = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message TensorShapeProto'Dim where
                              +        descriptor
                              +          = let size__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional size)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorShapeProto'Dim
                              +                name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorShapeProto'Dim
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TensorShapeProto.Dim")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, size__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, name__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("size", size__field_descriptor),
                              +                    ("name", name__field_descriptor)])
                              +
                              +dim ::
                              +    forall f s t a b . (Lens.Labels.HasLens "dim" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +dim
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "dim")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +size ::
                              +     forall f s t a b . (Lens.Labels.HasLens "size" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +size
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "size")
                              +
                              +unknownRank ::
                              +            forall f s t a b . (Lens.Labels.HasLens "unknownRank" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +unknownRank
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "unknownRank")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorSlice.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorSlice.html new file mode 100644 index 0000000..a23526e --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.TensorSlice.html @@ -0,0 +1,203 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/tensor_slice.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.TensorSlice where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data TensorSliceProto = TensorSliceProto{_TensorSliceProto'extent
                              +                                         :: ![TensorSliceProto'Extent]}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [TensorSliceProto'Extent],
                              +          b ~ [TensorSliceProto'Extent], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "extent" f TensorSliceProto TensorSliceProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorSliceProto'extent
                              +                 (\ x__ y__ -> x__{_TensorSliceProto'extent = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default TensorSliceProto where
                              +        def = TensorSliceProto{_TensorSliceProto'extent = []}
                              +
                              +instance Data.ProtoLens.Message TensorSliceProto where
                              +        descriptor
                              +          = let extent__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "extent"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor TensorSliceProto'Extent)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked extent)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorSliceProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TensorSliceProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, extent__field_descriptor)])
                              +                (Data.Map.fromList [("extent", extent__field_descriptor)])
                              +
                              +data TensorSliceProto'Extent = TensorSliceProto'Extent{_TensorSliceProto'Extent'start
                              +                                                       :: !Data.Int.Int64,
                              +                                                       _TensorSliceProto'Extent'hasLength ::
                              +                                                       !(Prelude.Maybe
                              +                                                           TensorSliceProto'Extent'HasLength)}
                              +                             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data TensorSliceProto'Extent'HasLength = TensorSliceProto'Extent'Length !Data.Int.Int64
                              +                                       deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "start" f TensorSliceProto'Extent
                              +           TensorSliceProto'Extent
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorSliceProto'Extent'start
                              +                 (\ x__ y__ -> x__{_TensorSliceProto'Extent'start = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe TensorSliceProto'Extent'HasLength,
                              +          b ~ Prelude.Maybe TensorSliceProto'Extent'HasLength,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'hasLength" f TensorSliceProto'Extent
                              +           TensorSliceProto'Extent
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorSliceProto'Extent'hasLength
                              +                 (\ x__ y__ -> x__{_TensorSliceProto'Extent'hasLength = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe Data.Int.Int64,
                              +          b ~ Prelude.Maybe Data.Int.Int64, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'length" f TensorSliceProto'Extent
                              +           TensorSliceProto'Extent
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorSliceProto'Extent'hasLength
                              +                 (\ x__ y__ -> x__{_TensorSliceProto'Extent'hasLength = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just
                              +                          (TensorSliceProto'Extent'Length x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap TensorSliceProto'Extent'Length y__))
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "length" f TensorSliceProto'Extent
                              +           TensorSliceProto'Extent
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorSliceProto'Extent'hasLength
                              +                 (\ x__ y__ -> x__{_TensorSliceProto'Extent'hasLength = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just
                              +                             (TensorSliceProto'Extent'Length x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap TensorSliceProto'Extent'Length y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance Data.Default.Class.Default TensorSliceProto'Extent where
                              +        def
                              +          = TensorSliceProto'Extent{_TensorSliceProto'Extent'start =
                              +                                      Data.ProtoLens.fieldDefault,
                              +                                    _TensorSliceProto'Extent'hasLength = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message TensorSliceProto'Extent where
                              +        descriptor
                              +          = let start__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "start"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional start)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorSliceProto'Extent
                              +                length__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "length"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.OptionalField maybe'length)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorSliceProto'Extent
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TensorSliceProto.Extent")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, start__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, length__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("start", start__field_descriptor),
                              +                    ("length", length__field_descriptor)])
                              +
                              +extent ::
                              +       forall f s t a b . (Lens.Labels.HasLens "extent" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +extent
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "extent")
                              +
                              +length ::
                              +       forall f s t a b . (Lens.Labels.HasLens "length" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +length
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "length")
                              +
                              +maybe'hasLength ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'hasLength" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'hasLength
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'hasLength")
                              +
                              +maybe'length ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "maybe'length" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +maybe'length
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'length")
                              +
                              +start ::
                              +      forall f s t a b . (Lens.Labels.HasLens "start" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +start
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "start")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Types.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Types.html new file mode 100644 index 0000000..f1dc4ca --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Types.html @@ -0,0 +1,346 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/types.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.Types where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data DataType = DT_INVALID
                              +              | DT_FLOAT
                              +              | DT_DOUBLE
                              +              | DT_INT32
                              +              | DT_UINT8
                              +              | DT_INT16
                              +              | DT_INT8
                              +              | DT_STRING
                              +              | DT_COMPLEX64
                              +              | DT_INT64
                              +              | DT_BOOL
                              +              | DT_QINT8
                              +              | DT_QUINT8
                              +              | DT_QINT32
                              +              | DT_BFLOAT16
                              +              | DT_QINT16
                              +              | DT_QUINT16
                              +              | DT_UINT16
                              +              | DT_COMPLEX128
                              +              | DT_HALF
                              +              | DT_RESOURCE
                              +              | DT_FLOAT_REF
                              +              | DT_DOUBLE_REF
                              +              | DT_INT32_REF
                              +              | DT_UINT8_REF
                              +              | DT_INT16_REF
                              +              | DT_INT8_REF
                              +              | DT_STRING_REF
                              +              | DT_COMPLEX64_REF
                              +              | DT_INT64_REF
                              +              | DT_BOOL_REF
                              +              | DT_QINT8_REF
                              +              | DT_QUINT8_REF
                              +              | DT_QINT32_REF
                              +              | DT_BFLOAT16_REF
                              +              | DT_QINT16_REF
                              +              | DT_QUINT16_REF
                              +              | DT_UINT16_REF
                              +              | DT_COMPLEX128_REF
                              +              | DT_HALF_REF
                              +              | DT_RESOURCE_REF
                              +              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default DataType where
                              +        def = DT_INVALID
                              +
                              +instance Data.ProtoLens.FieldDefault DataType where
                              +        fieldDefault = DT_INVALID
                              +
                              +instance Data.ProtoLens.MessageEnum DataType where
                              +        maybeToEnum 0 = Prelude.Just DT_INVALID
                              +        maybeToEnum 1 = Prelude.Just DT_FLOAT
                              +        maybeToEnum 2 = Prelude.Just DT_DOUBLE
                              +        maybeToEnum 3 = Prelude.Just DT_INT32
                              +        maybeToEnum 4 = Prelude.Just DT_UINT8
                              +        maybeToEnum 5 = Prelude.Just DT_INT16
                              +        maybeToEnum 6 = Prelude.Just DT_INT8
                              +        maybeToEnum 7 = Prelude.Just DT_STRING
                              +        maybeToEnum 8 = Prelude.Just DT_COMPLEX64
                              +        maybeToEnum 9 = Prelude.Just DT_INT64
                              +        maybeToEnum 10 = Prelude.Just DT_BOOL
                              +        maybeToEnum 11 = Prelude.Just DT_QINT8
                              +        maybeToEnum 12 = Prelude.Just DT_QUINT8
                              +        maybeToEnum 13 = Prelude.Just DT_QINT32
                              +        maybeToEnum 14 = Prelude.Just DT_BFLOAT16
                              +        maybeToEnum 15 = Prelude.Just DT_QINT16
                              +        maybeToEnum 16 = Prelude.Just DT_QUINT16
                              +        maybeToEnum 17 = Prelude.Just DT_UINT16
                              +        maybeToEnum 18 = Prelude.Just DT_COMPLEX128
                              +        maybeToEnum 19 = Prelude.Just DT_HALF
                              +        maybeToEnum 20 = Prelude.Just DT_RESOURCE
                              +        maybeToEnum 101 = Prelude.Just DT_FLOAT_REF
                              +        maybeToEnum 102 = Prelude.Just DT_DOUBLE_REF
                              +        maybeToEnum 103 = Prelude.Just DT_INT32_REF
                              +        maybeToEnum 104 = Prelude.Just DT_UINT8_REF
                              +        maybeToEnum 105 = Prelude.Just DT_INT16_REF
                              +        maybeToEnum 106 = Prelude.Just DT_INT8_REF
                              +        maybeToEnum 107 = Prelude.Just DT_STRING_REF
                              +        maybeToEnum 108 = Prelude.Just DT_COMPLEX64_REF
                              +        maybeToEnum 109 = Prelude.Just DT_INT64_REF
                              +        maybeToEnum 110 = Prelude.Just DT_BOOL_REF
                              +        maybeToEnum 111 = Prelude.Just DT_QINT8_REF
                              +        maybeToEnum 112 = Prelude.Just DT_QUINT8_REF
                              +        maybeToEnum 113 = Prelude.Just DT_QINT32_REF
                              +        maybeToEnum 114 = Prelude.Just DT_BFLOAT16_REF
                              +        maybeToEnum 115 = Prelude.Just DT_QINT16_REF
                              +        maybeToEnum 116 = Prelude.Just DT_QUINT16_REF
                              +        maybeToEnum 117 = Prelude.Just DT_UINT16_REF
                              +        maybeToEnum 118 = Prelude.Just DT_COMPLEX128_REF
                              +        maybeToEnum 119 = Prelude.Just DT_HALF_REF
                              +        maybeToEnum 120 = Prelude.Just DT_RESOURCE_REF
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum DT_INVALID = "DT_INVALID"
                              +        showEnum DT_FLOAT = "DT_FLOAT"
                              +        showEnum DT_DOUBLE = "DT_DOUBLE"
                              +        showEnum DT_INT32 = "DT_INT32"
                              +        showEnum DT_UINT8 = "DT_UINT8"
                              +        showEnum DT_INT16 = "DT_INT16"
                              +        showEnum DT_INT8 = "DT_INT8"
                              +        showEnum DT_STRING = "DT_STRING"
                              +        showEnum DT_COMPLEX64 = "DT_COMPLEX64"
                              +        showEnum DT_INT64 = "DT_INT64"
                              +        showEnum DT_BOOL = "DT_BOOL"
                              +        showEnum DT_QINT8 = "DT_QINT8"
                              +        showEnum DT_QUINT8 = "DT_QUINT8"
                              +        showEnum DT_QINT32 = "DT_QINT32"
                              +        showEnum DT_BFLOAT16 = "DT_BFLOAT16"
                              +        showEnum DT_QINT16 = "DT_QINT16"
                              +        showEnum DT_QUINT16 = "DT_QUINT16"
                              +        showEnum DT_UINT16 = "DT_UINT16"
                              +        showEnum DT_COMPLEX128 = "DT_COMPLEX128"
                              +        showEnum DT_HALF = "DT_HALF"
                              +        showEnum DT_RESOURCE = "DT_RESOURCE"
                              +        showEnum DT_FLOAT_REF = "DT_FLOAT_REF"
                              +        showEnum DT_DOUBLE_REF = "DT_DOUBLE_REF"
                              +        showEnum DT_INT32_REF = "DT_INT32_REF"
                              +        showEnum DT_UINT8_REF = "DT_UINT8_REF"
                              +        showEnum DT_INT16_REF = "DT_INT16_REF"
                              +        showEnum DT_INT8_REF = "DT_INT8_REF"
                              +        showEnum DT_STRING_REF = "DT_STRING_REF"
                              +        showEnum DT_COMPLEX64_REF = "DT_COMPLEX64_REF"
                              +        showEnum DT_INT64_REF = "DT_INT64_REF"
                              +        showEnum DT_BOOL_REF = "DT_BOOL_REF"
                              +        showEnum DT_QINT8_REF = "DT_QINT8_REF"
                              +        showEnum DT_QUINT8_REF = "DT_QUINT8_REF"
                              +        showEnum DT_QINT32_REF = "DT_QINT32_REF"
                              +        showEnum DT_BFLOAT16_REF = "DT_BFLOAT16_REF"
                              +        showEnum DT_QINT16_REF = "DT_QINT16_REF"
                              +        showEnum DT_QUINT16_REF = "DT_QUINT16_REF"
                              +        showEnum DT_UINT16_REF = "DT_UINT16_REF"
                              +        showEnum DT_COMPLEX128_REF = "DT_COMPLEX128_REF"
                              +        showEnum DT_HALF_REF = "DT_HALF_REF"
                              +        showEnum DT_RESOURCE_REF = "DT_RESOURCE_REF"
                              +        readEnum "DT_INVALID" = Prelude.Just DT_INVALID
                              +        readEnum "DT_FLOAT" = Prelude.Just DT_FLOAT
                              +        readEnum "DT_DOUBLE" = Prelude.Just DT_DOUBLE
                              +        readEnum "DT_INT32" = Prelude.Just DT_INT32
                              +        readEnum "DT_UINT8" = Prelude.Just DT_UINT8
                              +        readEnum "DT_INT16" = Prelude.Just DT_INT16
                              +        readEnum "DT_INT8" = Prelude.Just DT_INT8
                              +        readEnum "DT_STRING" = Prelude.Just DT_STRING
                              +        readEnum "DT_COMPLEX64" = Prelude.Just DT_COMPLEX64
                              +        readEnum "DT_INT64" = Prelude.Just DT_INT64
                              +        readEnum "DT_BOOL" = Prelude.Just DT_BOOL
                              +        readEnum "DT_QINT8" = Prelude.Just DT_QINT8
                              +        readEnum "DT_QUINT8" = Prelude.Just DT_QUINT8
                              +        readEnum "DT_QINT32" = Prelude.Just DT_QINT32
                              +        readEnum "DT_BFLOAT16" = Prelude.Just DT_BFLOAT16
                              +        readEnum "DT_QINT16" = Prelude.Just DT_QINT16
                              +        readEnum "DT_QUINT16" = Prelude.Just DT_QUINT16
                              +        readEnum "DT_UINT16" = Prelude.Just DT_UINT16
                              +        readEnum "DT_COMPLEX128" = Prelude.Just DT_COMPLEX128
                              +        readEnum "DT_HALF" = Prelude.Just DT_HALF
                              +        readEnum "DT_RESOURCE" = Prelude.Just DT_RESOURCE
                              +        readEnum "DT_FLOAT_REF" = Prelude.Just DT_FLOAT_REF
                              +        readEnum "DT_DOUBLE_REF" = Prelude.Just DT_DOUBLE_REF
                              +        readEnum "DT_INT32_REF" = Prelude.Just DT_INT32_REF
                              +        readEnum "DT_UINT8_REF" = Prelude.Just DT_UINT8_REF
                              +        readEnum "DT_INT16_REF" = Prelude.Just DT_INT16_REF
                              +        readEnum "DT_INT8_REF" = Prelude.Just DT_INT8_REF
                              +        readEnum "DT_STRING_REF" = Prelude.Just DT_STRING_REF
                              +        readEnum "DT_COMPLEX64_REF" = Prelude.Just DT_COMPLEX64_REF
                              +        readEnum "DT_INT64_REF" = Prelude.Just DT_INT64_REF
                              +        readEnum "DT_BOOL_REF" = Prelude.Just DT_BOOL_REF
                              +        readEnum "DT_QINT8_REF" = Prelude.Just DT_QINT8_REF
                              +        readEnum "DT_QUINT8_REF" = Prelude.Just DT_QUINT8_REF
                              +        readEnum "DT_QINT32_REF" = Prelude.Just DT_QINT32_REF
                              +        readEnum "DT_BFLOAT16_REF" = Prelude.Just DT_BFLOAT16_REF
                              +        readEnum "DT_QINT16_REF" = Prelude.Just DT_QINT16_REF
                              +        readEnum "DT_QUINT16_REF" = Prelude.Just DT_QUINT16_REF
                              +        readEnum "DT_UINT16_REF" = Prelude.Just DT_UINT16_REF
                              +        readEnum "DT_COMPLEX128_REF" = Prelude.Just DT_COMPLEX128_REF
                              +        readEnum "DT_HALF_REF" = Prelude.Just DT_HALF_REF
                              +        readEnum "DT_RESOURCE_REF" = Prelude.Just DT_RESOURCE_REF
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum DataType where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum DataType: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum DT_INVALID = 0
                              +        fromEnum DT_FLOAT = 1
                              +        fromEnum DT_DOUBLE = 2
                              +        fromEnum DT_INT32 = 3
                              +        fromEnum DT_UINT8 = 4
                              +        fromEnum DT_INT16 = 5
                              +        fromEnum DT_INT8 = 6
                              +        fromEnum DT_STRING = 7
                              +        fromEnum DT_COMPLEX64 = 8
                              +        fromEnum DT_INT64 = 9
                              +        fromEnum DT_BOOL = 10
                              +        fromEnum DT_QINT8 = 11
                              +        fromEnum DT_QUINT8 = 12
                              +        fromEnum DT_QINT32 = 13
                              +        fromEnum DT_BFLOAT16 = 14
                              +        fromEnum DT_QINT16 = 15
                              +        fromEnum DT_QUINT16 = 16
                              +        fromEnum DT_UINT16 = 17
                              +        fromEnum DT_COMPLEX128 = 18
                              +        fromEnum DT_HALF = 19
                              +        fromEnum DT_RESOURCE = 20
                              +        fromEnum DT_FLOAT_REF = 101
                              +        fromEnum DT_DOUBLE_REF = 102
                              +        fromEnum DT_INT32_REF = 103
                              +        fromEnum DT_UINT8_REF = 104
                              +        fromEnum DT_INT16_REF = 105
                              +        fromEnum DT_INT8_REF = 106
                              +        fromEnum DT_STRING_REF = 107
                              +        fromEnum DT_COMPLEX64_REF = 108
                              +        fromEnum DT_INT64_REF = 109
                              +        fromEnum DT_BOOL_REF = 110
                              +        fromEnum DT_QINT8_REF = 111
                              +        fromEnum DT_QUINT8_REF = 112
                              +        fromEnum DT_QINT32_REF = 113
                              +        fromEnum DT_BFLOAT16_REF = 114
                              +        fromEnum DT_QINT16_REF = 115
                              +        fromEnum DT_QUINT16_REF = 116
                              +        fromEnum DT_UINT16_REF = 117
                              +        fromEnum DT_COMPLEX128_REF = 118
                              +        fromEnum DT_HALF_REF = 119
                              +        fromEnum DT_RESOURCE_REF = 120
                              +        succ DT_RESOURCE_REF
                              +          = Prelude.error
                              +              "DataType.succ: bad argument DT_RESOURCE_REF. This value would be out of bounds."
                              +        succ DT_INVALID = DT_FLOAT
                              +        succ DT_FLOAT = DT_DOUBLE
                              +        succ DT_DOUBLE = DT_INT32
                              +        succ DT_INT32 = DT_UINT8
                              +        succ DT_UINT8 = DT_INT16
                              +        succ DT_INT16 = DT_INT8
                              +        succ DT_INT8 = DT_STRING
                              +        succ DT_STRING = DT_COMPLEX64
                              +        succ DT_COMPLEX64 = DT_INT64
                              +        succ DT_INT64 = DT_BOOL
                              +        succ DT_BOOL = DT_QINT8
                              +        succ DT_QINT8 = DT_QUINT8
                              +        succ DT_QUINT8 = DT_QINT32
                              +        succ DT_QINT32 = DT_BFLOAT16
                              +        succ DT_BFLOAT16 = DT_QINT16
                              +        succ DT_QINT16 = DT_QUINT16
                              +        succ DT_QUINT16 = DT_UINT16
                              +        succ DT_UINT16 = DT_COMPLEX128
                              +        succ DT_COMPLEX128 = DT_HALF
                              +        succ DT_HALF = DT_RESOURCE
                              +        succ DT_RESOURCE = DT_FLOAT_REF
                              +        succ DT_FLOAT_REF = DT_DOUBLE_REF
                              +        succ DT_DOUBLE_REF = DT_INT32_REF
                              +        succ DT_INT32_REF = DT_UINT8_REF
                              +        succ DT_UINT8_REF = DT_INT16_REF
                              +        succ DT_INT16_REF = DT_INT8_REF
                              +        succ DT_INT8_REF = DT_STRING_REF
                              +        succ DT_STRING_REF = DT_COMPLEX64_REF
                              +        succ DT_COMPLEX64_REF = DT_INT64_REF
                              +        succ DT_INT64_REF = DT_BOOL_REF
                              +        succ DT_BOOL_REF = DT_QINT8_REF
                              +        succ DT_QINT8_REF = DT_QUINT8_REF
                              +        succ DT_QUINT8_REF = DT_QINT32_REF
                              +        succ DT_QINT32_REF = DT_BFLOAT16_REF
                              +        succ DT_BFLOAT16_REF = DT_QINT16_REF
                              +        succ DT_QINT16_REF = DT_QUINT16_REF
                              +        succ DT_QUINT16_REF = DT_UINT16_REF
                              +        succ DT_UINT16_REF = DT_COMPLEX128_REF
                              +        succ DT_COMPLEX128_REF = DT_HALF_REF
                              +        succ DT_HALF_REF = DT_RESOURCE_REF
                              +        pred DT_INVALID
                              +          = Prelude.error
                              +              "DataType.pred: bad argument DT_INVALID. This value would be out of bounds."
                              +        pred DT_FLOAT = DT_INVALID
                              +        pred DT_DOUBLE = DT_FLOAT
                              +        pred DT_INT32 = DT_DOUBLE
                              +        pred DT_UINT8 = DT_INT32
                              +        pred DT_INT16 = DT_UINT8
                              +        pred DT_INT8 = DT_INT16
                              +        pred DT_STRING = DT_INT8
                              +        pred DT_COMPLEX64 = DT_STRING
                              +        pred DT_INT64 = DT_COMPLEX64
                              +        pred DT_BOOL = DT_INT64
                              +        pred DT_QINT8 = DT_BOOL
                              +        pred DT_QUINT8 = DT_QINT8
                              +        pred DT_QINT32 = DT_QUINT8
                              +        pred DT_BFLOAT16 = DT_QINT32
                              +        pred DT_QINT16 = DT_BFLOAT16
                              +        pred DT_QUINT16 = DT_QINT16
                              +        pred DT_UINT16 = DT_QUINT16
                              +        pred DT_COMPLEX128 = DT_UINT16
                              +        pred DT_HALF = DT_COMPLEX128
                              +        pred DT_RESOURCE = DT_HALF
                              +        pred DT_FLOAT_REF = DT_RESOURCE
                              +        pred DT_DOUBLE_REF = DT_FLOAT_REF
                              +        pred DT_INT32_REF = DT_DOUBLE_REF
                              +        pred DT_UINT8_REF = DT_INT32_REF
                              +        pred DT_INT16_REF = DT_UINT8_REF
                              +        pred DT_INT8_REF = DT_INT16_REF
                              +        pred DT_STRING_REF = DT_INT8_REF
                              +        pred DT_COMPLEX64_REF = DT_STRING_REF
                              +        pred DT_INT64_REF = DT_COMPLEX64_REF
                              +        pred DT_BOOL_REF = DT_INT64_REF
                              +        pred DT_QINT8_REF = DT_BOOL_REF
                              +        pred DT_QUINT8_REF = DT_QINT8_REF
                              +        pred DT_QINT32_REF = DT_QUINT8_REF
                              +        pred DT_BFLOAT16_REF = DT_QINT32_REF
                              +        pred DT_QINT16_REF = DT_BFLOAT16_REF
                              +        pred DT_QUINT16_REF = DT_QINT16_REF
                              +        pred DT_UINT16_REF = DT_QUINT16_REF
                              +        pred DT_COMPLEX128_REF = DT_UINT16_REF
                              +        pred DT_HALF_REF = DT_COMPLEX128_REF
                              +        pred DT_RESOURCE_REF = DT_HALF_REF
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded DataType where
                              +        minBound = DT_INVALID
                              +        maxBound = DT_RESOURCE_REF
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Variable.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Variable.html new file mode 100644 index 0000000..df1d634 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Variable.html @@ -0,0 +1,332 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/variable.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.Variable where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data SaveSliceInfoDef = SaveSliceInfoDef{_SaveSliceInfoDef'fullName
                              +                                         :: !Data.Text.Text,
                              +                                         _SaveSliceInfoDef'fullShape :: ![Data.Int.Int64],
                              +                                         _SaveSliceInfoDef'varOffset :: ![Data.Int.Int64],
                              +                                         _SaveSliceInfoDef'varShape :: ![Data.Int.Int64]}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "fullName" f SaveSliceInfoDef SaveSliceInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaveSliceInfoDef'fullName
                              +                 (\ x__ y__ -> x__{_SaveSliceInfoDef'fullName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int64], b ~ [Data.Int.Int64],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "fullShape" f SaveSliceInfoDef SaveSliceInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaveSliceInfoDef'fullShape
                              +                 (\ x__ y__ -> x__{_SaveSliceInfoDef'fullShape = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int64], b ~ [Data.Int.Int64],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "varOffset" f SaveSliceInfoDef SaveSliceInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaveSliceInfoDef'varOffset
                              +                 (\ x__ y__ -> x__{_SaveSliceInfoDef'varOffset = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int64], b ~ [Data.Int.Int64],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "varShape" f SaveSliceInfoDef SaveSliceInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaveSliceInfoDef'varShape
                              +                 (\ x__ y__ -> x__{_SaveSliceInfoDef'varShape = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SaveSliceInfoDef where
                              +        def
                              +          = SaveSliceInfoDef{_SaveSliceInfoDef'fullName =
                              +                               Data.ProtoLens.fieldDefault,
                              +                             _SaveSliceInfoDef'fullShape = [], _SaveSliceInfoDef'varOffset = [],
                              +                             _SaveSliceInfoDef'varShape = []}
                              +
                              +instance Data.ProtoLens.Message SaveSliceInfoDef where
                              +        descriptor
                              +          = let fullName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "full_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional fullName)
                              +                      :: Data.ProtoLens.FieldDescriptor SaveSliceInfoDef
                              +                fullShape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "full_shape"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed fullShape)
                              +                      :: Data.ProtoLens.FieldDescriptor SaveSliceInfoDef
                              +                varOffset__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "var_offset"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed varOffset)
                              +                      :: Data.ProtoLens.FieldDescriptor SaveSliceInfoDef
                              +                varShape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "var_shape"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed varShape)
                              +                      :: Data.ProtoLens.FieldDescriptor SaveSliceInfoDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SaveSliceInfoDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, fullName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, fullShape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, varOffset__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, varShape__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("full_name", fullName__field_descriptor),
                              +                    ("full_shape", fullShape__field_descriptor),
                              +                    ("var_offset", varOffset__field_descriptor),
                              +                    ("var_shape", varShape__field_descriptor)])
                              +
                              +data VariableDef = VariableDef{_VariableDef'variableName ::
                              +                               !Data.Text.Text,
                              +                               _VariableDef'initializerName :: !Data.Text.Text,
                              +                               _VariableDef'snapshotName :: !Data.Text.Text,
                              +                               _VariableDef'saveSliceInfoDef :: !(Prelude.Maybe SaveSliceInfoDef),
                              +                               _VariableDef'isResource :: !Prelude.Bool}
                              +                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "variableName" f VariableDef VariableDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VariableDef'variableName
                              +                 (\ x__ y__ -> x__{_VariableDef'variableName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "initializerName" f VariableDef VariableDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VariableDef'initializerName
                              +                 (\ x__ y__ -> x__{_VariableDef'initializerName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "snapshotName" f VariableDef VariableDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VariableDef'snapshotName
                              +                 (\ x__ y__ -> x__{_VariableDef'snapshotName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ SaveSliceInfoDef, b ~ SaveSliceInfoDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "saveSliceInfoDef" f VariableDef VariableDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VariableDef'saveSliceInfoDef
                              +                 (\ x__ y__ -> x__{_VariableDef'saveSliceInfoDef = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe SaveSliceInfoDef,
                              +          b ~ Prelude.Maybe SaveSliceInfoDef, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'saveSliceInfoDef" f VariableDef
                              +           VariableDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VariableDef'saveSliceInfoDef
                              +                 (\ x__ y__ -> x__{_VariableDef'saveSliceInfoDef = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "isResource" f VariableDef VariableDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VariableDef'isResource
                              +                 (\ x__ y__ -> x__{_VariableDef'isResource = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default VariableDef where
                              +        def
                              +          = VariableDef{_VariableDef'variableName =
                              +                          Data.ProtoLens.fieldDefault,
                              +                        _VariableDef'initializerName = Data.ProtoLens.fieldDefault,
                              +                        _VariableDef'snapshotName = Data.ProtoLens.fieldDefault,
                              +                        _VariableDef'saveSliceInfoDef = Prelude.Nothing,
                              +                        _VariableDef'isResource = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message VariableDef where
                              +        descriptor
                              +          = let variableName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "variable_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional variableName)
                              +                      :: Data.ProtoLens.FieldDescriptor VariableDef
                              +                initializerName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "initializer_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional initializerName)
                              +                      :: Data.ProtoLens.FieldDescriptor VariableDef
                              +                snapshotName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "snapshot_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional snapshotName)
                              +                      :: Data.ProtoLens.FieldDescriptor VariableDef
                              +                saveSliceInfoDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "save_slice_info_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SaveSliceInfoDef)
                              +                      (Data.ProtoLens.OptionalField maybe'saveSliceInfoDef)
                              +                      :: Data.ProtoLens.FieldDescriptor VariableDef
                              +                isResource__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "is_resource"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isResource)
                              +                      :: Data.ProtoLens.FieldDescriptor VariableDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.VariableDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, variableName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, initializerName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, snapshotName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, saveSliceInfoDef__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, isResource__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("variable_name", variableName__field_descriptor),
                              +                    ("initializer_name", initializerName__field_descriptor),
                              +                    ("snapshot_name", snapshotName__field_descriptor),
                              +                    ("save_slice_info_def", saveSliceInfoDef__field_descriptor),
                              +                    ("is_resource", isResource__field_descriptor)])
                              +
                              +fullName ::
                              +         forall f s t a b . (Lens.Labels.HasLens "fullName" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +fullName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "fullName")
                              +
                              +fullShape ::
                              +          forall f s t a b . (Lens.Labels.HasLens "fullShape" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +fullShape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "fullShape")
                              +
                              +initializerName ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "initializerName" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +initializerName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "initializerName")
                              +
                              +isResource ::
                              +           forall f s t a b . (Lens.Labels.HasLens "isResource" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +isResource
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "isResource")
                              +
                              +maybe'saveSliceInfoDef ::
                              +                       forall f s t a b .
                              +                         (Lens.Labels.HasLens "maybe'saveSliceInfoDef" f s t a b) =>
                              +                         Lens.Family2.LensLike f s t a b
                              +maybe'saveSliceInfoDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'saveSliceInfoDef")
                              +
                              +saveSliceInfoDef ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "saveSliceInfoDef" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +saveSliceInfoDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "saveSliceInfoDef")
                              +
                              +snapshotName ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "snapshotName" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +snapshotName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "snapshotName")
                              +
                              +varOffset ::
                              +          forall f s t a b . (Lens.Labels.HasLens "varOffset" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +varOffset
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "varOffset")
                              +
                              +varShape ::
                              +         forall f s t a b . (Lens.Labels.HasLens "varShape" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +varShape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "varShape")
                              +
                              +variableName ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "variableName" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +variableName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "variableName")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Versions.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Versions.html new file mode 100644 index 0000000..3b7aa6f --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Framework.Versions.html @@ -0,0 +1,122 @@ +
                              {- This file was auto-generated from tensorflow/core/framework/versions.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Framework.Versions where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data VersionDef = VersionDef{_VersionDef'producer ::
                              +                             !Data.Int.Int32,
                              +                             _VersionDef'minConsumer :: !Data.Int.Int32,
                              +                             _VersionDef'badConsumers :: ![Data.Int.Int32]}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "producer" f VersionDef VersionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VersionDef'producer
                              +                 (\ x__ y__ -> x__{_VersionDef'producer = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "minConsumer" f VersionDef VersionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VersionDef'minConsumer
                              +                 (\ x__ y__ -> x__{_VersionDef'minConsumer = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Int.Int32], b ~ [Data.Int.Int32],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "badConsumers" f VersionDef VersionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _VersionDef'badConsumers
                              +                 (\ x__ y__ -> x__{_VersionDef'badConsumers = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default VersionDef where
                              +        def
                              +          = VersionDef{_VersionDef'producer = Data.ProtoLens.fieldDefault,
                              +                       _VersionDef'minConsumer = Data.ProtoLens.fieldDefault,
                              +                       _VersionDef'badConsumers = []}
                              +
                              +instance Data.ProtoLens.Message VersionDef where
                              +        descriptor
                              +          = let producer__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "producer"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional producer)
                              +                      :: Data.ProtoLens.FieldDescriptor VersionDef
                              +                minConsumer__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "min_consumer"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional minConsumer)
                              +                      :: Data.ProtoLens.FieldDescriptor VersionDef
                              +                badConsumers__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "bad_consumers"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed badConsumers)
                              +                      :: Data.ProtoLens.FieldDescriptor VersionDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.VersionDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, producer__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, minConsumer__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, badConsumers__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("producer", producer__field_descriptor),
                              +                    ("min_consumer", minConsumer__field_descriptor),
                              +                    ("bad_consumers", badConsumers__field_descriptor)])
                              +
                              +badConsumers ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "badConsumers" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +badConsumers
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "badConsumers")
                              +
                              +minConsumer ::
                              +            forall f s t a b . (Lens.Labels.HasLens "minConsumer" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +minConsumer
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "minConsumer")
                              +
                              +producer ::
                              +         forall f s t a b . (Lens.Labels.HasLens "producer" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +producer
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "producer")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Lib.Core.ErrorCodes.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Lib.Core.ErrorCodes.html new file mode 100644 index 0000000..1cf3da2 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Lib.Core.ErrorCodes.html @@ -0,0 +1,199 @@ +
                              {- This file was auto-generated from tensorflow/core/lib/core/error_codes.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Lib.Core.ErrorCodes where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data Code = OK
                              +          | CANCELLED
                              +          | UNKNOWN
                              +          | INVALID_ARGUMENT
                              +          | DEADLINE_EXCEEDED
                              +          | NOT_FOUND
                              +          | ALREADY_EXISTS
                              +          | PERMISSION_DENIED
                              +          | RESOURCE_EXHAUSTED
                              +          | FAILED_PRECONDITION
                              +          | ABORTED
                              +          | OUT_OF_RANGE
                              +          | UNIMPLEMENTED
                              +          | INTERNAL
                              +          | UNAVAILABLE
                              +          | DATA_LOSS
                              +          | UNAUTHENTICATED
                              +          | DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
                              +          deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default Code where
                              +        def = OK
                              +
                              +instance Data.ProtoLens.FieldDefault Code where
                              +        fieldDefault = OK
                              +
                              +instance Data.ProtoLens.MessageEnum Code where
                              +        maybeToEnum 0 = Prelude.Just OK
                              +        maybeToEnum 1 = Prelude.Just CANCELLED
                              +        maybeToEnum 2 = Prelude.Just UNKNOWN
                              +        maybeToEnum 3 = Prelude.Just INVALID_ARGUMENT
                              +        maybeToEnum 4 = Prelude.Just DEADLINE_EXCEEDED
                              +        maybeToEnum 5 = Prelude.Just NOT_FOUND
                              +        maybeToEnum 6 = Prelude.Just ALREADY_EXISTS
                              +        maybeToEnum 7 = Prelude.Just PERMISSION_DENIED
                              +        maybeToEnum 8 = Prelude.Just RESOURCE_EXHAUSTED
                              +        maybeToEnum 9 = Prelude.Just FAILED_PRECONDITION
                              +        maybeToEnum 10 = Prelude.Just ABORTED
                              +        maybeToEnum 11 = Prelude.Just OUT_OF_RANGE
                              +        maybeToEnum 12 = Prelude.Just UNIMPLEMENTED
                              +        maybeToEnum 13 = Prelude.Just INTERNAL
                              +        maybeToEnum 14 = Prelude.Just UNAVAILABLE
                              +        maybeToEnum 15 = Prelude.Just DATA_LOSS
                              +        maybeToEnum 16 = Prelude.Just UNAUTHENTICATED
                              +        maybeToEnum 20
                              +          = Prelude.Just
                              +              DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum OK = "OK"
                              +        showEnum CANCELLED = "CANCELLED"
                              +        showEnum UNKNOWN = "UNKNOWN"
                              +        showEnum INVALID_ARGUMENT = "INVALID_ARGUMENT"
                              +        showEnum DEADLINE_EXCEEDED = "DEADLINE_EXCEEDED"
                              +        showEnum NOT_FOUND = "NOT_FOUND"
                              +        showEnum ALREADY_EXISTS = "ALREADY_EXISTS"
                              +        showEnum PERMISSION_DENIED = "PERMISSION_DENIED"
                              +        showEnum RESOURCE_EXHAUSTED = "RESOURCE_EXHAUSTED"
                              +        showEnum FAILED_PRECONDITION = "FAILED_PRECONDITION"
                              +        showEnum ABORTED = "ABORTED"
                              +        showEnum OUT_OF_RANGE = "OUT_OF_RANGE"
                              +        showEnum UNIMPLEMENTED = "UNIMPLEMENTED"
                              +        showEnum INTERNAL = "INTERNAL"
                              +        showEnum UNAVAILABLE = "UNAVAILABLE"
                              +        showEnum DATA_LOSS = "DATA_LOSS"
                              +        showEnum UNAUTHENTICATED = "UNAUTHENTICATED"
                              +        showEnum
                              +          DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
                              +          = "DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_"
                              +        readEnum "OK" = Prelude.Just OK
                              +        readEnum "CANCELLED" = Prelude.Just CANCELLED
                              +        readEnum "UNKNOWN" = Prelude.Just UNKNOWN
                              +        readEnum "INVALID_ARGUMENT" = Prelude.Just INVALID_ARGUMENT
                              +        readEnum "DEADLINE_EXCEEDED" = Prelude.Just DEADLINE_EXCEEDED
                              +        readEnum "NOT_FOUND" = Prelude.Just NOT_FOUND
                              +        readEnum "ALREADY_EXISTS" = Prelude.Just ALREADY_EXISTS
                              +        readEnum "PERMISSION_DENIED" = Prelude.Just PERMISSION_DENIED
                              +        readEnum "RESOURCE_EXHAUSTED" = Prelude.Just RESOURCE_EXHAUSTED
                              +        readEnum "FAILED_PRECONDITION" = Prelude.Just FAILED_PRECONDITION
                              +        readEnum "ABORTED" = Prelude.Just ABORTED
                              +        readEnum "OUT_OF_RANGE" = Prelude.Just OUT_OF_RANGE
                              +        readEnum "UNIMPLEMENTED" = Prelude.Just UNIMPLEMENTED
                              +        readEnum "INTERNAL" = Prelude.Just INTERNAL
                              +        readEnum "UNAVAILABLE" = Prelude.Just UNAVAILABLE
                              +        readEnum "DATA_LOSS" = Prelude.Just DATA_LOSS
                              +        readEnum "UNAUTHENTICATED" = Prelude.Just UNAUTHENTICATED
                              +        readEnum
                              +          "DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_"
                              +          = Prelude.Just
                              +              DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum Code where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum Code: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum OK = 0
                              +        fromEnum CANCELLED = 1
                              +        fromEnum UNKNOWN = 2
                              +        fromEnum INVALID_ARGUMENT = 3
                              +        fromEnum DEADLINE_EXCEEDED = 4
                              +        fromEnum NOT_FOUND = 5
                              +        fromEnum ALREADY_EXISTS = 6
                              +        fromEnum PERMISSION_DENIED = 7
                              +        fromEnum RESOURCE_EXHAUSTED = 8
                              +        fromEnum FAILED_PRECONDITION = 9
                              +        fromEnum ABORTED = 10
                              +        fromEnum OUT_OF_RANGE = 11
                              +        fromEnum UNIMPLEMENTED = 12
                              +        fromEnum INTERNAL = 13
                              +        fromEnum UNAVAILABLE = 14
                              +        fromEnum DATA_LOSS = 15
                              +        fromEnum UNAUTHENTICATED = 16
                              +        fromEnum
                              +          DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
                              +          = 20
                              +        succ
                              +          DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
                              +          = Prelude.error
                              +              "Code.succ: bad argument DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_. This value would be out of bounds."
                              +        succ OK = CANCELLED
                              +        succ CANCELLED = UNKNOWN
                              +        succ UNKNOWN = INVALID_ARGUMENT
                              +        succ INVALID_ARGUMENT = DEADLINE_EXCEEDED
                              +        succ DEADLINE_EXCEEDED = NOT_FOUND
                              +        succ NOT_FOUND = ALREADY_EXISTS
                              +        succ ALREADY_EXISTS = PERMISSION_DENIED
                              +        succ PERMISSION_DENIED = RESOURCE_EXHAUSTED
                              +        succ RESOURCE_EXHAUSTED = FAILED_PRECONDITION
                              +        succ FAILED_PRECONDITION = ABORTED
                              +        succ ABORTED = OUT_OF_RANGE
                              +        succ OUT_OF_RANGE = UNIMPLEMENTED
                              +        succ UNIMPLEMENTED = INTERNAL
                              +        succ INTERNAL = UNAVAILABLE
                              +        succ UNAVAILABLE = DATA_LOSS
                              +        succ DATA_LOSS = UNAUTHENTICATED
                              +        succ UNAUTHENTICATED
                              +          = DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
                              +        pred OK
                              +          = Prelude.error
                              +              "Code.pred: bad argument OK. This value would be out of bounds."
                              +        pred CANCELLED = OK
                              +        pred UNKNOWN = CANCELLED
                              +        pred INVALID_ARGUMENT = UNKNOWN
                              +        pred DEADLINE_EXCEEDED = INVALID_ARGUMENT
                              +        pred NOT_FOUND = DEADLINE_EXCEEDED
                              +        pred ALREADY_EXISTS = NOT_FOUND
                              +        pred PERMISSION_DENIED = ALREADY_EXISTS
                              +        pred RESOURCE_EXHAUSTED = PERMISSION_DENIED
                              +        pred FAILED_PRECONDITION = RESOURCE_EXHAUSTED
                              +        pred ABORTED = FAILED_PRECONDITION
                              +        pred OUT_OF_RANGE = ABORTED
                              +        pred UNIMPLEMENTED = OUT_OF_RANGE
                              +        pred INTERNAL = UNIMPLEMENTED
                              +        pred UNAVAILABLE = INTERNAL
                              +        pred DATA_LOSS = UNAVAILABLE
                              +        pred UNAUTHENTICATED = DATA_LOSS
                              +        pred
                              +          DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
                              +          = UNAUTHENTICATED
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded Code where
                              +        minBound = OK
                              +        maxBound
                              +          = DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Cluster.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Cluster.html new file mode 100644 index 0000000..c9982a1 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Cluster.html @@ -0,0 +1,200 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/cluster.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.Cluster where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data ClusterDef = ClusterDef{_ClusterDef'job :: ![JobDef]}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [JobDef], b ~ [JobDef], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "job" f ClusterDef ClusterDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ClusterDef'job
                              +                 (\ x__ y__ -> x__{_ClusterDef'job = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default ClusterDef where
                              +        def = ClusterDef{_ClusterDef'job = []}
                              +
                              +instance Data.ProtoLens.Message ClusterDef where
                              +        descriptor
                              +          = let job__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "job"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor JobDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked job)
                              +                      :: Data.ProtoLens.FieldDescriptor ClusterDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.ClusterDef")
                              +                (Data.Map.fromList [(Data.ProtoLens.Tag 1, job__field_descriptor)])
                              +                (Data.Map.fromList [("job", job__field_descriptor)])
                              +
                              +data JobDef = JobDef{_JobDef'name :: !Data.Text.Text,
                              +                     _JobDef'tasks :: !(Data.Map.Map Data.Int.Int32 Data.Text.Text)}
                              +            deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f JobDef JobDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _JobDef'name
                              +                 (\ x__ y__ -> x__{_JobDef'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Map.Map Data.Int.Int32 Data.Text.Text,
                              +          b ~ Data.Map.Map Data.Int.Int32 Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tasks" f JobDef JobDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _JobDef'tasks
                              +                 (\ x__ y__ -> x__{_JobDef'tasks = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default JobDef where
                              +        def
                              +          = JobDef{_JobDef'name = Data.ProtoLens.fieldDefault,
                              +                   _JobDef'tasks = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message JobDef where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor JobDef
                              +                tasks__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tasks"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor JobDef'TasksEntry)
                              +                      (Data.ProtoLens.MapField key value tasks)
                              +                      :: Data.ProtoLens.FieldDescriptor JobDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.JobDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, tasks__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("tasks", tasks__field_descriptor)])
                              +
                              +data JobDef'TasksEntry = JobDef'TasksEntry{_JobDef'TasksEntry'key
                              +                                           :: !Data.Int.Int32,
                              +                                           _JobDef'TasksEntry'value :: !Data.Text.Text}
                              +                       deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f JobDef'TasksEntry JobDef'TasksEntry a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _JobDef'TasksEntry'key
                              +                 (\ x__ y__ -> x__{_JobDef'TasksEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f JobDef'TasksEntry JobDef'TasksEntry a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _JobDef'TasksEntry'value
                              +                 (\ x__ y__ -> x__{_JobDef'TasksEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default JobDef'TasksEntry where
                              +        def
                              +          = JobDef'TasksEntry{_JobDef'TasksEntry'key =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _JobDef'TasksEntry'value = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message JobDef'TasksEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor JobDef'TasksEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional value)
                              +                      :: Data.ProtoLens.FieldDescriptor JobDef'TasksEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.JobDef.TasksEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +job ::
                              +    forall f s t a b . (Lens.Labels.HasLens "job" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +job
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "job")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +tasks ::
                              +      forall f s t a b . (Lens.Labels.HasLens "tasks" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +tasks
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tasks")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Config.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Config.html new file mode 100644 index 0000000..d764c4f --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Config.html @@ -0,0 +1,2048 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/config.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.Config where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.CostGraph
                              +import qualified Proto.Tensorflow.Core.Framework.Graph
                              +import qualified Proto.Tensorflow.Core.Framework.StepStats
                              +import qualified Proto.Tensorflow.Core.Protobuf.Cluster
                              +import qualified Proto.Tensorflow.Core.Protobuf.Debug
                              +import qualified Proto.Tensorflow.Core.Protobuf.RewriterConfig
                              +
                              +data ConfigProto = ConfigProto{_ConfigProto'deviceCount ::
                              +                               !(Data.Map.Map Data.Text.Text Data.Int.Int32),
                              +                               _ConfigProto'intraOpParallelismThreads :: !Data.Int.Int32,
                              +                               _ConfigProto'interOpParallelismThreads :: !Data.Int.Int32,
                              +                               _ConfigProto'usePerSessionThreads :: !Prelude.Bool,
                              +                               _ConfigProto'sessionInterOpThreadPool :: ![ThreadPoolOptionProto],
                              +                               _ConfigProto'placementPeriod :: !Data.Int.Int32,
                              +                               _ConfigProto'deviceFilters :: ![Data.Text.Text],
                              +                               _ConfigProto'gpuOptions :: !(Prelude.Maybe GPUOptions),
                              +                               _ConfigProto'allowSoftPlacement :: !Prelude.Bool,
                              +                               _ConfigProto'logDevicePlacement :: !Prelude.Bool,
                              +                               _ConfigProto'graphOptions :: !(Prelude.Maybe GraphOptions),
                              +                               _ConfigProto'operationTimeoutInMs :: !Data.Int.Int64,
                              +                               _ConfigProto'rpcOptions :: !(Prelude.Maybe RPCOptions),
                              +                               _ConfigProto'clusterDef ::
                              +                               !(Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef)}
                              +                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text Data.Int.Int32,
                              +          b ~ Data.Map.Map Data.Text.Text Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deviceCount" f ConfigProto ConfigProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'deviceCount
                              +                 (\ x__ y__ -> x__{_ConfigProto'deviceCount = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "intraOpParallelismThreads" f ConfigProto
                              +           ConfigProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'intraOpParallelismThreads
                              +                 (\ x__ y__ -> x__{_ConfigProto'intraOpParallelismThreads = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "interOpParallelismThreads" f ConfigProto
                              +           ConfigProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'interOpParallelismThreads
                              +                 (\ x__ y__ -> x__{_ConfigProto'interOpParallelismThreads = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "usePerSessionThreads" f ConfigProto
                              +           ConfigProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'usePerSessionThreads
                              +                 (\ x__ y__ -> x__{_ConfigProto'usePerSessionThreads = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [ThreadPoolOptionProto], b ~ [ThreadPoolOptionProto],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "sessionInterOpThreadPool" f ConfigProto
                              +           ConfigProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'sessionInterOpThreadPool
                              +                 (\ x__ y__ -> x__{_ConfigProto'sessionInterOpThreadPool = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "placementPeriod" f ConfigProto ConfigProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'placementPeriod
                              +                 (\ x__ y__ -> x__{_ConfigProto'placementPeriod = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deviceFilters" f ConfigProto ConfigProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'deviceFilters
                              +                 (\ x__ y__ -> x__{_ConfigProto'deviceFilters = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ GPUOptions, b ~ GPUOptions, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "gpuOptions" f ConfigProto ConfigProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'gpuOptions
                              +                 (\ x__ y__ -> x__{_ConfigProto'gpuOptions = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe GPUOptions,
                              +          b ~ Prelude.Maybe GPUOptions, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'gpuOptions" f ConfigProto ConfigProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'gpuOptions
                              +                 (\ x__ y__ -> x__{_ConfigProto'gpuOptions = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allowSoftPlacement" f ConfigProto ConfigProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'allowSoftPlacement
                              +                 (\ x__ y__ -> x__{_ConfigProto'allowSoftPlacement = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "logDevicePlacement" f ConfigProto ConfigProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'logDevicePlacement
                              +                 (\ x__ y__ -> x__{_ConfigProto'logDevicePlacement = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ GraphOptions, b ~ GraphOptions, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "graphOptions" f ConfigProto ConfigProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'graphOptions
                              +                 (\ x__ y__ -> x__{_ConfigProto'graphOptions = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe GraphOptions,
                              +          b ~ Prelude.Maybe GraphOptions, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'graphOptions" f ConfigProto ConfigProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'graphOptions
                              +                 (\ x__ y__ -> x__{_ConfigProto'graphOptions = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "operationTimeoutInMs" f ConfigProto
                              +           ConfigProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'operationTimeoutInMs
                              +                 (\ x__ y__ -> x__{_ConfigProto'operationTimeoutInMs = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ RPCOptions, b ~ RPCOptions, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "rpcOptions" f ConfigProto ConfigProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'rpcOptions
                              +                 (\ x__ y__ -> x__{_ConfigProto'rpcOptions = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe RPCOptions,
                              +          b ~ Prelude.Maybe RPCOptions, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'rpcOptions" f ConfigProto ConfigProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'rpcOptions
                              +                 (\ x__ y__ -> x__{_ConfigProto'rpcOptions = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef,
                              +          b ~ Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "clusterDef" f ConfigProto ConfigProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'clusterDef
                              +                 (\ x__ y__ -> x__{_ConfigProto'clusterDef = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'clusterDef" f ConfigProto ConfigProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'clusterDef
                              +                 (\ x__ y__ -> x__{_ConfigProto'clusterDef = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default ConfigProto where
                              +        def
                              +          = ConfigProto{_ConfigProto'deviceCount = Data.Map.empty,
                              +                        _ConfigProto'intraOpParallelismThreads =
                              +                          Data.ProtoLens.fieldDefault,
                              +                        _ConfigProto'interOpParallelismThreads =
                              +                          Data.ProtoLens.fieldDefault,
                              +                        _ConfigProto'usePerSessionThreads = Data.ProtoLens.fieldDefault,
                              +                        _ConfigProto'sessionInterOpThreadPool = [],
                              +                        _ConfigProto'placementPeriod = Data.ProtoLens.fieldDefault,
                              +                        _ConfigProto'deviceFilters = [],
                              +                        _ConfigProto'gpuOptions = Prelude.Nothing,
                              +                        _ConfigProto'allowSoftPlacement = Data.ProtoLens.fieldDefault,
                              +                        _ConfigProto'logDevicePlacement = Data.ProtoLens.fieldDefault,
                              +                        _ConfigProto'graphOptions = Prelude.Nothing,
                              +                        _ConfigProto'operationTimeoutInMs = Data.ProtoLens.fieldDefault,
                              +                        _ConfigProto'rpcOptions = Prelude.Nothing,
                              +                        _ConfigProto'clusterDef = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message ConfigProto where
                              +        descriptor
                              +          = let deviceCount__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device_count"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor ConfigProto'DeviceCountEntry)
                              +                      (Data.ProtoLens.MapField key value deviceCount)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                intraOpParallelismThreads__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "intra_op_parallelism_threads"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         intraOpParallelismThreads)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                interOpParallelismThreads__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "inter_op_parallelism_threads"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         interOpParallelismThreads)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                usePerSessionThreads__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "use_per_session_threads"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         usePerSessionThreads)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                sessionInterOpThreadPool__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "session_inter_op_thread_pool"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor ThreadPoolOptionProto)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         sessionInterOpThreadPool)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                placementPeriod__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "placement_period"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional placementPeriod)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                deviceFilters__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device_filters"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         deviceFilters)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                gpuOptions__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "gpu_options"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor GPUOptions)
                              +                      (Data.ProtoLens.OptionalField maybe'gpuOptions)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                allowSoftPlacement__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allow_soft_placement"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         allowSoftPlacement)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                logDevicePlacement__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "log_device_placement"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         logDevicePlacement)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                graphOptions__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "graph_options"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor GraphOptions)
                              +                      (Data.ProtoLens.OptionalField maybe'graphOptions)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                operationTimeoutInMs__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "operation_timeout_in_ms"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         operationTimeoutInMs)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                rpcOptions__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "rpc_options"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor RPCOptions)
                              +                      (Data.ProtoLens.OptionalField maybe'rpcOptions)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +                clusterDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cluster_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef)
                              +                      (Data.ProtoLens.OptionalField maybe'clusterDef)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.ConfigProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, deviceCount__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2,
                              +                     intraOpParallelismThreads__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5,
                              +                     interOpParallelismThreads__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, usePerSessionThreads__field_descriptor),
                              +                    (Data.ProtoLens.Tag 12,
                              +                     sessionInterOpThreadPool__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, placementPeriod__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, deviceFilters__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, gpuOptions__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, allowSoftPlacement__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, logDevicePlacement__field_descriptor),
                              +                    (Data.ProtoLens.Tag 10, graphOptions__field_descriptor),
                              +                    (Data.ProtoLens.Tag 11, operationTimeoutInMs__field_descriptor),
                              +                    (Data.ProtoLens.Tag 13, rpcOptions__field_descriptor),
                              +                    (Data.ProtoLens.Tag 14, clusterDef__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("device_count", deviceCount__field_descriptor),
                              +                    ("intra_op_parallelism_threads",
                              +                     intraOpParallelismThreads__field_descriptor),
                              +                    ("inter_op_parallelism_threads",
                              +                     interOpParallelismThreads__field_descriptor),
                              +                    ("use_per_session_threads",
                              +                     usePerSessionThreads__field_descriptor),
                              +                    ("session_inter_op_thread_pool",
                              +                     sessionInterOpThreadPool__field_descriptor),
                              +                    ("placement_period", placementPeriod__field_descriptor),
                              +                    ("device_filters", deviceFilters__field_descriptor),
                              +                    ("gpu_options", gpuOptions__field_descriptor),
                              +                    ("allow_soft_placement", allowSoftPlacement__field_descriptor),
                              +                    ("log_device_placement", logDevicePlacement__field_descriptor),
                              +                    ("graph_options", graphOptions__field_descriptor),
                              +                    ("operation_timeout_in_ms",
                              +                     operationTimeoutInMs__field_descriptor),
                              +                    ("rpc_options", rpcOptions__field_descriptor),
                              +                    ("cluster_def", clusterDef__field_descriptor)])
                              +
                              +data ConfigProto'DeviceCountEntry = ConfigProto'DeviceCountEntry{_ConfigProto'DeviceCountEntry'key
                              +                                                                 :: !Data.Text.Text,
                              +                                                                 _ConfigProto'DeviceCountEntry'value
                              +                                                                 :: !Data.Int.Int32}
                              +                                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f ConfigProto'DeviceCountEntry
                              +           ConfigProto'DeviceCountEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'DeviceCountEntry'key
                              +                 (\ x__ y__ -> x__{_ConfigProto'DeviceCountEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f ConfigProto'DeviceCountEntry
                              +           ConfigProto'DeviceCountEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ConfigProto'DeviceCountEntry'value
                              +                 (\ x__ y__ -> x__{_ConfigProto'DeviceCountEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default ConfigProto'DeviceCountEntry
                              +         where
                              +        def
                              +          = ConfigProto'DeviceCountEntry{_ConfigProto'DeviceCountEntry'key =
                              +                                           Data.ProtoLens.fieldDefault,
                              +                                         _ConfigProto'DeviceCountEntry'value =
                              +                                           Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message ConfigProto'DeviceCountEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto'DeviceCountEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional value)
                              +                      :: Data.ProtoLens.FieldDescriptor ConfigProto'DeviceCountEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.ConfigProto.DeviceCountEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data GPUOptions = GPUOptions{_GPUOptions'perProcessGpuMemoryFraction
                              +                             :: !Prelude.Double,
                              +                             _GPUOptions'allocatorType :: !Data.Text.Text,
                              +                             _GPUOptions'deferredDeletionBytes :: !Data.Int.Int64,
                              +                             _GPUOptions'allowGrowth :: !Prelude.Bool,
                              +                             _GPUOptions'visibleDeviceList :: !Data.Text.Text,
                              +                             _GPUOptions'pollingActiveDelayUsecs :: !Data.Int.Int32,
                              +                             _GPUOptions'pollingInactiveDelayMsecs :: !Data.Int.Int32,
                              +                             _GPUOptions'forceGpuCompatible :: !Prelude.Bool}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "perProcessGpuMemoryFraction" f GPUOptions
                              +           GPUOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _GPUOptions'perProcessGpuMemoryFraction
                              +                 (\ x__ y__ -> x__{_GPUOptions'perProcessGpuMemoryFraction = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allocatorType" f GPUOptions GPUOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUOptions'allocatorType
                              +                 (\ x__ y__ -> x__{_GPUOptions'allocatorType = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deferredDeletionBytes" f GPUOptions GPUOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUOptions'deferredDeletionBytes
                              +                 (\ x__ y__ -> x__{_GPUOptions'deferredDeletionBytes = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "allowGrowth" f GPUOptions GPUOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUOptions'allowGrowth
                              +                 (\ x__ y__ -> x__{_GPUOptions'allowGrowth = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "visibleDeviceList" f GPUOptions GPUOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUOptions'visibleDeviceList
                              +                 (\ x__ y__ -> x__{_GPUOptions'visibleDeviceList = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "pollingActiveDelayUsecs" f GPUOptions
                              +           GPUOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUOptions'pollingActiveDelayUsecs
                              +                 (\ x__ y__ -> x__{_GPUOptions'pollingActiveDelayUsecs = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "pollingInactiveDelayMsecs" f GPUOptions
                              +           GPUOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUOptions'pollingInactiveDelayMsecs
                              +                 (\ x__ y__ -> x__{_GPUOptions'pollingInactiveDelayMsecs = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "forceGpuCompatible" f GPUOptions GPUOptions a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUOptions'forceGpuCompatible
                              +                 (\ x__ y__ -> x__{_GPUOptions'forceGpuCompatible = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default GPUOptions where
                              +        def
                              +          = GPUOptions{_GPUOptions'perProcessGpuMemoryFraction =
                              +                         Data.ProtoLens.fieldDefault,
                              +                       _GPUOptions'allocatorType = Data.ProtoLens.fieldDefault,
                              +                       _GPUOptions'deferredDeletionBytes = Data.ProtoLens.fieldDefault,
                              +                       _GPUOptions'allowGrowth = Data.ProtoLens.fieldDefault,
                              +                       _GPUOptions'visibleDeviceList = Data.ProtoLens.fieldDefault,
                              +                       _GPUOptions'pollingActiveDelayUsecs = Data.ProtoLens.fieldDefault,
                              +                       _GPUOptions'pollingInactiveDelayMsecs =
                              +                         Data.ProtoLens.fieldDefault,
                              +                       _GPUOptions'forceGpuCompatible = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message GPUOptions where
                              +        descriptor
                              +          = let perProcessGpuMemoryFraction__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "per_process_gpu_memory_fraction"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         perProcessGpuMemoryFraction)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUOptions
                              +                allocatorType__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allocator_type"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorType)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUOptions
                              +                deferredDeletionBytes__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "deferred_deletion_bytes"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         deferredDeletionBytes)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUOptions
                              +                allowGrowth__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "allow_growth"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allowGrowth)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUOptions
                              +                visibleDeviceList__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "visible_device_list"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         visibleDeviceList)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUOptions
                              +                pollingActiveDelayUsecs__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "polling_active_delay_usecs"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         pollingActiveDelayUsecs)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUOptions
                              +                pollingInactiveDelayMsecs__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "polling_inactive_delay_msecs"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         pollingInactiveDelayMsecs)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUOptions
                              +                forceGpuCompatible__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "force_gpu_compatible"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         forceGpuCompatible)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUOptions
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.GPUOptions")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1,
                              +                     perProcessGpuMemoryFraction__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, allocatorType__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, deferredDeletionBytes__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, allowGrowth__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, visibleDeviceList__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, pollingActiveDelayUsecs__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7,
                              +                     pollingInactiveDelayMsecs__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, forceGpuCompatible__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("per_process_gpu_memory_fraction",
                              +                     perProcessGpuMemoryFraction__field_descriptor),
                              +                    ("allocator_type", allocatorType__field_descriptor),
                              +                    ("deferred_deletion_bytes",
                              +                     deferredDeletionBytes__field_descriptor),
                              +                    ("allow_growth", allowGrowth__field_descriptor),
                              +                    ("visible_device_list", visibleDeviceList__field_descriptor),
                              +                    ("polling_active_delay_usecs",
                              +                     pollingActiveDelayUsecs__field_descriptor),
                              +                    ("polling_inactive_delay_msecs",
                              +                     pollingInactiveDelayMsecs__field_descriptor),
                              +                    ("force_gpu_compatible", forceGpuCompatible__field_descriptor)])
                              +
                              +data GraphOptions = GraphOptions{_GraphOptions'enableRecvScheduling
                              +                                 :: !Prelude.Bool,
                              +                                 _GraphOptions'optimizerOptions ::
                              +                                 !(Prelude.Maybe OptimizerOptions),
                              +                                 _GraphOptions'buildCostModel :: !Data.Int.Int64,
                              +                                 _GraphOptions'buildCostModelAfter :: !Data.Int.Int64,
                              +                                 _GraphOptions'inferShapes :: !Prelude.Bool,
                              +                                 _GraphOptions'placePrunedGraph :: !Prelude.Bool,
                              +                                 _GraphOptions'enableBfloat16Sendrecv :: !Prelude.Bool,
                              +                                 _GraphOptions'timelineStep :: !Data.Int.Int32,
                              +                                 _GraphOptions'rewriteOptions ::
                              +                                 !(Prelude.Maybe
                              +                                     Proto.Tensorflow.Core.Protobuf.RewriterConfig.RewriterConfig)}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "enableRecvScheduling" f GraphOptions
                              +           GraphOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'enableRecvScheduling
                              +                 (\ x__ y__ -> x__{_GraphOptions'enableRecvScheduling = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ OptimizerOptions, b ~ OptimizerOptions,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "optimizerOptions" f GraphOptions GraphOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'optimizerOptions
                              +                 (\ x__ y__ -> x__{_GraphOptions'optimizerOptions = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe OptimizerOptions,
                              +          b ~ Prelude.Maybe OptimizerOptions, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'optimizerOptions" f GraphOptions
                              +           GraphOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'optimizerOptions
                              +                 (\ x__ y__ -> x__{_GraphOptions'optimizerOptions = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "buildCostModel" f GraphOptions GraphOptions a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'buildCostModel
                              +                 (\ x__ y__ -> x__{_GraphOptions'buildCostModel = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "buildCostModelAfter" f GraphOptions
                              +           GraphOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'buildCostModelAfter
                              +                 (\ x__ y__ -> x__{_GraphOptions'buildCostModelAfter = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "inferShapes" f GraphOptions GraphOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'inferShapes
                              +                 (\ x__ y__ -> x__{_GraphOptions'inferShapes = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "placePrunedGraph" f GraphOptions GraphOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'placePrunedGraph
                              +                 (\ x__ y__ -> x__{_GraphOptions'placePrunedGraph = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "enableBfloat16Sendrecv" f GraphOptions
                              +           GraphOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'enableBfloat16Sendrecv
                              +                 (\ x__ y__ -> x__{_GraphOptions'enableBfloat16Sendrecv = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "timelineStep" f GraphOptions GraphOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'timelineStep
                              +                 (\ x__ y__ -> x__{_GraphOptions'timelineStep = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Protobuf.RewriterConfig.RewriterConfig,
                              +          b ~ Proto.Tensorflow.Core.Protobuf.RewriterConfig.RewriterConfig,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "rewriteOptions" f GraphOptions GraphOptions a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'rewriteOptions
                              +                 (\ x__ y__ -> x__{_GraphOptions'rewriteOptions = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Protobuf.RewriterConfig.RewriterConfig,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Protobuf.RewriterConfig.RewriterConfig,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'rewriteOptions" f GraphOptions
                              +           GraphOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GraphOptions'rewriteOptions
                              +                 (\ x__ y__ -> x__{_GraphOptions'rewriteOptions = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default GraphOptions where
                              +        def
                              +          = GraphOptions{_GraphOptions'enableRecvScheduling =
                              +                           Data.ProtoLens.fieldDefault,
                              +                         _GraphOptions'optimizerOptions = Prelude.Nothing,
                              +                         _GraphOptions'buildCostModel = Data.ProtoLens.fieldDefault,
                              +                         _GraphOptions'buildCostModelAfter = Data.ProtoLens.fieldDefault,
                              +                         _GraphOptions'inferShapes = Data.ProtoLens.fieldDefault,
                              +                         _GraphOptions'placePrunedGraph = Data.ProtoLens.fieldDefault,
                              +                         _GraphOptions'enableBfloat16Sendrecv = Data.ProtoLens.fieldDefault,
                              +                         _GraphOptions'timelineStep = Data.ProtoLens.fieldDefault,
                              +                         _GraphOptions'rewriteOptions = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message GraphOptions where
                              +        descriptor
                              +          = let enableRecvScheduling__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "enable_recv_scheduling"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         enableRecvScheduling)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphOptions
                              +                optimizerOptions__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "optimizer_options"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor OptimizerOptions)
                              +                      (Data.ProtoLens.OptionalField maybe'optimizerOptions)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphOptions
                              +                buildCostModel__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "build_cost_model"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional buildCostModel)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphOptions
                              +                buildCostModelAfter__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "build_cost_model_after"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         buildCostModelAfter)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphOptions
                              +                inferShapes__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "infer_shapes"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional inferShapes)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphOptions
                              +                placePrunedGraph__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "place_pruned_graph"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         placePrunedGraph)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphOptions
                              +                enableBfloat16Sendrecv__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "enable_bfloat16_sendrecv"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         enableBfloat16Sendrecv)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphOptions
                              +                timelineStep__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "timeline_step"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional timelineStep)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphOptions
                              +                rewriteOptions__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "rewrite_options"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Protobuf.RewriterConfig.RewriterConfig)
                              +                      (Data.ProtoLens.OptionalField maybe'rewriteOptions)
                              +                      :: Data.ProtoLens.FieldDescriptor GraphOptions
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.GraphOptions")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 2, enableRecvScheduling__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, optimizerOptions__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, buildCostModel__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, buildCostModelAfter__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, inferShapes__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, placePrunedGraph__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, enableBfloat16Sendrecv__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, timelineStep__field_descriptor),
                              +                    (Data.ProtoLens.Tag 10, rewriteOptions__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("enable_recv_scheduling",
                              +                     enableRecvScheduling__field_descriptor),
                              +                    ("optimizer_options", optimizerOptions__field_descriptor),
                              +                    ("build_cost_model", buildCostModel__field_descriptor),
                              +                    ("build_cost_model_after", buildCostModelAfter__field_descriptor),
                              +                    ("infer_shapes", inferShapes__field_descriptor),
                              +                    ("place_pruned_graph", placePrunedGraph__field_descriptor),
                              +                    ("enable_bfloat16_sendrecv",
                              +                     enableBfloat16Sendrecv__field_descriptor),
                              +                    ("timeline_step", timelineStep__field_descriptor),
                              +                    ("rewrite_options", rewriteOptions__field_descriptor)])
                              +
                              +data OptimizerOptions = OptimizerOptions{_OptimizerOptions'doCommonSubexpressionElimination
                              +                                         :: !Prelude.Bool,
                              +                                         _OptimizerOptions'doConstantFolding :: !Prelude.Bool,
                              +                                         _OptimizerOptions'doFunctionInlining :: !Prelude.Bool,
                              +                                         _OptimizerOptions'optLevel :: !OptimizerOptions'Level,
                              +                                         _OptimizerOptions'globalJitLevel ::
                              +                                         !OptimizerOptions'GlobalJitLevel}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "doCommonSubexpressionElimination" f
                              +           OptimizerOptions
                              +           OptimizerOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _OptimizerOptions'doCommonSubexpressionElimination
                              +                 (\ x__ y__ ->
                              +                    x__{_OptimizerOptions'doCommonSubexpressionElimination = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "doConstantFolding" f OptimizerOptions
                              +           OptimizerOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OptimizerOptions'doConstantFolding
                              +                 (\ x__ y__ -> x__{_OptimizerOptions'doConstantFolding = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "doFunctionInlining" f OptimizerOptions
                              +           OptimizerOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OptimizerOptions'doFunctionInlining
                              +                 (\ x__ y__ -> x__{_OptimizerOptions'doFunctionInlining = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ OptimizerOptions'Level, b ~ OptimizerOptions'Level,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "optLevel" f OptimizerOptions OptimizerOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OptimizerOptions'optLevel
                              +                 (\ x__ y__ -> x__{_OptimizerOptions'optLevel = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ OptimizerOptions'GlobalJitLevel,
                              +          b ~ OptimizerOptions'GlobalJitLevel, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "globalJitLevel" f OptimizerOptions
                              +           OptimizerOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _OptimizerOptions'globalJitLevel
                              +                 (\ x__ y__ -> x__{_OptimizerOptions'globalJitLevel = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default OptimizerOptions where
                              +        def
                              +          = OptimizerOptions{_OptimizerOptions'doCommonSubexpressionElimination
                              +                               = Data.ProtoLens.fieldDefault,
                              +                             _OptimizerOptions'doConstantFolding = Data.ProtoLens.fieldDefault,
                              +                             _OptimizerOptions'doFunctionInlining = Data.ProtoLens.fieldDefault,
                              +                             _OptimizerOptions'optLevel = Data.Default.Class.def,
                              +                             _OptimizerOptions'globalJitLevel = Data.Default.Class.def}
                              +
                              +instance Data.ProtoLens.Message OptimizerOptions where
                              +        descriptor
                              +          = let doCommonSubexpressionElimination__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor
                              +                      "do_common_subexpression_elimination"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         doCommonSubexpressionElimination)
                              +                      :: Data.ProtoLens.FieldDescriptor OptimizerOptions
                              +                doConstantFolding__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "do_constant_folding"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         doConstantFolding)
                              +                      :: Data.ProtoLens.FieldDescriptor OptimizerOptions
                              +                doFunctionInlining__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "do_function_inlining"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         doFunctionInlining)
                              +                      :: Data.ProtoLens.FieldDescriptor OptimizerOptions
                              +                optLevel__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "opt_level"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor OptimizerOptions'Level)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional optLevel)
                              +                      :: Data.ProtoLens.FieldDescriptor OptimizerOptions
                              +                globalJitLevel__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "global_jit_level"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor OptimizerOptions'GlobalJitLevel)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional globalJitLevel)
                              +                      :: Data.ProtoLens.FieldDescriptor OptimizerOptions
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.OptimizerOptions")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1,
                              +                     doCommonSubexpressionElimination__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, doConstantFolding__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, doFunctionInlining__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, optLevel__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, globalJitLevel__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("do_common_subexpression_elimination",
                              +                     doCommonSubexpressionElimination__field_descriptor),
                              +                    ("do_constant_folding", doConstantFolding__field_descriptor),
                              +                    ("do_function_inlining", doFunctionInlining__field_descriptor),
                              +                    ("opt_level", optLevel__field_descriptor),
                              +                    ("global_jit_level", globalJitLevel__field_descriptor)])
                              +
                              +data OptimizerOptions'GlobalJitLevel = OptimizerOptions'OFF
                              +                                     | OptimizerOptions'DEFAULT
                              +                                     | OptimizerOptions'ON_1
                              +                                     | OptimizerOptions'ON_2
                              +                                     deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default OptimizerOptions'GlobalJitLevel
                              +         where
                              +        def = OptimizerOptions'OFF
                              +
                              +instance Data.ProtoLens.FieldDefault
                              +           OptimizerOptions'GlobalJitLevel
                              +         where
                              +        fieldDefault = OptimizerOptions'OFF
                              +
                              +instance Data.ProtoLens.MessageEnum OptimizerOptions'GlobalJitLevel
                              +         where
                              +        maybeToEnum (-1) = Prelude.Just OptimizerOptions'OFF
                              +        maybeToEnum 0 = Prelude.Just OptimizerOptions'DEFAULT
                              +        maybeToEnum 1 = Prelude.Just OptimizerOptions'ON_1
                              +        maybeToEnum 2 = Prelude.Just OptimizerOptions'ON_2
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum OptimizerOptions'OFF = "OFF"
                              +        showEnum OptimizerOptions'DEFAULT = "DEFAULT"
                              +        showEnum OptimizerOptions'ON_1 = "ON_1"
                              +        showEnum OptimizerOptions'ON_2 = "ON_2"
                              +        readEnum "OFF" = Prelude.Just OptimizerOptions'OFF
                              +        readEnum "DEFAULT" = Prelude.Just OptimizerOptions'DEFAULT
                              +        readEnum "ON_1" = Prelude.Just OptimizerOptions'ON_1
                              +        readEnum "ON_2" = Prelude.Just OptimizerOptions'ON_2
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum OptimizerOptions'GlobalJitLevel where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum GlobalJitLevel: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum OptimizerOptions'OFF = -1
                              +        fromEnum OptimizerOptions'DEFAULT = 0
                              +        fromEnum OptimizerOptions'ON_1 = 1
                              +        fromEnum OptimizerOptions'ON_2 = 2
                              +        succ OptimizerOptions'ON_2
                              +          = Prelude.error
                              +              "OptimizerOptions'GlobalJitLevel.succ: bad argument OptimizerOptions'ON_2. This value would be out of bounds."
                              +        succ OptimizerOptions'OFF = OptimizerOptions'DEFAULT
                              +        succ OptimizerOptions'DEFAULT = OptimizerOptions'ON_1
                              +        succ OptimizerOptions'ON_1 = OptimizerOptions'ON_2
                              +        pred OptimizerOptions'OFF
                              +          = Prelude.error
                              +              "OptimizerOptions'GlobalJitLevel.pred: bad argument OptimizerOptions'OFF. This value would be out of bounds."
                              +        pred OptimizerOptions'DEFAULT = OptimizerOptions'OFF
                              +        pred OptimizerOptions'ON_1 = OptimizerOptions'DEFAULT
                              +        pred OptimizerOptions'ON_2 = OptimizerOptions'ON_1
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded OptimizerOptions'GlobalJitLevel where
                              +        minBound = OptimizerOptions'OFF
                              +        maxBound = OptimizerOptions'ON_2
                              +
                              +data OptimizerOptions'Level = OptimizerOptions'L0
                              +                            | OptimizerOptions'L1
                              +                            deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default OptimizerOptions'Level where
                              +        def = OptimizerOptions'L0
                              +
                              +instance Data.ProtoLens.FieldDefault OptimizerOptions'Level where
                              +        fieldDefault = OptimizerOptions'L0
                              +
                              +instance Data.ProtoLens.MessageEnum OptimizerOptions'Level where
                              +        maybeToEnum (-1) = Prelude.Just OptimizerOptions'L0
                              +        maybeToEnum 0 = Prelude.Just OptimizerOptions'L1
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum OptimizerOptions'L0 = "L0"
                              +        showEnum OptimizerOptions'L1 = "L1"
                              +        readEnum "L0" = Prelude.Just OptimizerOptions'L0
                              +        readEnum "L1" = Prelude.Just OptimizerOptions'L1
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum OptimizerOptions'Level where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum Level: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum OptimizerOptions'L0 = -1
                              +        fromEnum OptimizerOptions'L1 = 0
                              +        succ OptimizerOptions'L1
                              +          = Prelude.error
                              +              "OptimizerOptions'Level.succ: bad argument OptimizerOptions'L1. This value would be out of bounds."
                              +        succ OptimizerOptions'L0 = OptimizerOptions'L1
                              +        pred OptimizerOptions'L0
                              +          = Prelude.error
                              +              "OptimizerOptions'Level.pred: bad argument OptimizerOptions'L0. This value would be out of bounds."
                              +        pred OptimizerOptions'L1 = OptimizerOptions'L0
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded OptimizerOptions'Level where
                              +        minBound = OptimizerOptions'L0
                              +        maxBound = OptimizerOptions'L1
                              +
                              +data RPCOptions = RPCOptions{_RPCOptions'useRpcForInprocessMaster
                              +                             :: !Prelude.Bool}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "useRpcForInprocessMaster" f RPCOptions
                              +           RPCOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RPCOptions'useRpcForInprocessMaster
                              +                 (\ x__ y__ -> x__{_RPCOptions'useRpcForInprocessMaster = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default RPCOptions where
                              +        def
                              +          = RPCOptions{_RPCOptions'useRpcForInprocessMaster =
                              +                         Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message RPCOptions where
                              +        descriptor
                              +          = let useRpcForInprocessMaster__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "use_rpc_for_inprocess_master"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         useRpcForInprocessMaster)
                              +                      :: Data.ProtoLens.FieldDescriptor RPCOptions
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.RPCOptions")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1,
                              +                     useRpcForInprocessMaster__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("use_rpc_for_inprocess_master",
                              +                     useRpcForInprocessMaster__field_descriptor)])
                              +
                              +data RunMetadata = RunMetadata{_RunMetadata'stepStats ::
                              +                               !(Prelude.Maybe
                              +                                   Proto.Tensorflow.Core.Framework.StepStats.StepStats),
                              +                               _RunMetadata'costGraph ::
                              +                               !(Prelude.Maybe
                              +                                   Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef),
                              +                               _RunMetadata'partitionGraphs ::
                              +                               ![Proto.Tensorflow.Core.Framework.Graph.GraphDef]}
                              +                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.StepStats.StepStats,
                              +          b ~ Proto.Tensorflow.Core.Framework.StepStats.StepStats,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "stepStats" f RunMetadata RunMetadata a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunMetadata'stepStats
                              +                 (\ x__ y__ -> x__{_RunMetadata'stepStats = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.StepStats.StepStats,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.StepStats.StepStats,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'stepStats" f RunMetadata RunMetadata a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunMetadata'stepStats
                              +                 (\ x__ y__ -> x__{_RunMetadata'stepStats = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef,
                              +          b ~ Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "costGraph" f RunMetadata RunMetadata a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunMetadata'costGraph
                              +                 (\ x__ y__ -> x__{_RunMetadata'costGraph = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'costGraph" f RunMetadata RunMetadata a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunMetadata'costGraph
                              +                 (\ x__ y__ -> x__{_RunMetadata'costGraph = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Proto.Tensorflow.Core.Framework.Graph.GraphDef],
                              +          b ~ [Proto.Tensorflow.Core.Framework.Graph.GraphDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "partitionGraphs" f RunMetadata RunMetadata a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunMetadata'partitionGraphs
                              +                 (\ x__ y__ -> x__{_RunMetadata'partitionGraphs = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default RunMetadata where
                              +        def
                              +          = RunMetadata{_RunMetadata'stepStats = Prelude.Nothing,
                              +                        _RunMetadata'costGraph = Prelude.Nothing,
                              +                        _RunMetadata'partitionGraphs = []}
                              +
                              +instance Data.ProtoLens.Message RunMetadata where
                              +        descriptor
                              +          = let stepStats__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "step_stats"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.StepStats.StepStats)
                              +                      (Data.ProtoLens.OptionalField maybe'stepStats)
                              +                      :: Data.ProtoLens.FieldDescriptor RunMetadata
                              +                costGraph__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cost_graph"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef)
                              +                      (Data.ProtoLens.OptionalField maybe'costGraph)
                              +                      :: Data.ProtoLens.FieldDescriptor RunMetadata
                              +                partitionGraphs__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "partition_graphs"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Graph.GraphDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         partitionGraphs)
                              +                      :: Data.ProtoLens.FieldDescriptor RunMetadata
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.RunMetadata")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, stepStats__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, costGraph__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, partitionGraphs__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("step_stats", stepStats__field_descriptor),
                              +                    ("cost_graph", costGraph__field_descriptor),
                              +                    ("partition_graphs", partitionGraphs__field_descriptor)])
                              +
                              +data RunOptions = RunOptions{_RunOptions'traceLevel ::
                              +                             !RunOptions'TraceLevel,
                              +                             _RunOptions'timeoutInMs :: !Data.Int.Int64,
                              +                             _RunOptions'interOpThreadPool :: !Data.Int.Int32,
                              +                             _RunOptions'outputPartitionGraphs :: !Prelude.Bool,
                              +                             _RunOptions'debugOptions ::
                              +                             !(Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Debug.DebugOptions)}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ RunOptions'TraceLevel, b ~ RunOptions'TraceLevel,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "traceLevel" f RunOptions RunOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunOptions'traceLevel
                              +                 (\ x__ y__ -> x__{_RunOptions'traceLevel = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "timeoutInMs" f RunOptions RunOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunOptions'timeoutInMs
                              +                 (\ x__ y__ -> x__{_RunOptions'timeoutInMs = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "interOpThreadPool" f RunOptions RunOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunOptions'interOpThreadPool
                              +                 (\ x__ y__ -> x__{_RunOptions'interOpThreadPool = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "outputPartitionGraphs" f RunOptions RunOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunOptions'outputPartitionGraphs
                              +                 (\ x__ y__ -> x__{_RunOptions'outputPartitionGraphs = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Protobuf.Debug.DebugOptions,
                              +          b ~ Proto.Tensorflow.Core.Protobuf.Debug.DebugOptions,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "debugOptions" f RunOptions RunOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunOptions'debugOptions
                              +                 (\ x__ y__ -> x__{_RunOptions'debugOptions = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Debug.DebugOptions,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Debug.DebugOptions,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'debugOptions" f RunOptions RunOptions a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunOptions'debugOptions
                              +                 (\ x__ y__ -> x__{_RunOptions'debugOptions = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default RunOptions where
                              +        def
                              +          = RunOptions{_RunOptions'traceLevel = Data.Default.Class.def,
                              +                       _RunOptions'timeoutInMs = Data.ProtoLens.fieldDefault,
                              +                       _RunOptions'interOpThreadPool = Data.ProtoLens.fieldDefault,
                              +                       _RunOptions'outputPartitionGraphs = Data.ProtoLens.fieldDefault,
                              +                       _RunOptions'debugOptions = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message RunOptions where
                              +        descriptor
                              +          = let traceLevel__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "trace_level"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor RunOptions'TraceLevel)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional traceLevel)
                              +                      :: Data.ProtoLens.FieldDescriptor RunOptions
                              +                timeoutInMs__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "timeout_in_ms"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional timeoutInMs)
                              +                      :: Data.ProtoLens.FieldDescriptor RunOptions
                              +                interOpThreadPool__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "inter_op_thread_pool"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         interOpThreadPool)
                              +                      :: Data.ProtoLens.FieldDescriptor RunOptions
                              +                outputPartitionGraphs__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "output_partition_graphs"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         outputPartitionGraphs)
                              +                      :: Data.ProtoLens.FieldDescriptor RunOptions
                              +                debugOptions__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "debug_options"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Protobuf.Debug.DebugOptions)
                              +                      (Data.ProtoLens.OptionalField maybe'debugOptions)
                              +                      :: Data.ProtoLens.FieldDescriptor RunOptions
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.RunOptions")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, traceLevel__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, timeoutInMs__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, interOpThreadPool__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, outputPartitionGraphs__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, debugOptions__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("trace_level", traceLevel__field_descriptor),
                              +                    ("timeout_in_ms", timeoutInMs__field_descriptor),
                              +                    ("inter_op_thread_pool", interOpThreadPool__field_descriptor),
                              +                    ("output_partition_graphs",
                              +                     outputPartitionGraphs__field_descriptor),
                              +                    ("debug_options", debugOptions__field_descriptor)])
                              +
                              +data RunOptions'TraceLevel = RunOptions'NO_TRACE
                              +                           | RunOptions'SOFTWARE_TRACE
                              +                           | RunOptions'HARDWARE_TRACE
                              +                           | RunOptions'FULL_TRACE
                              +                           deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default RunOptions'TraceLevel where
                              +        def = RunOptions'NO_TRACE
                              +
                              +instance Data.ProtoLens.FieldDefault RunOptions'TraceLevel where
                              +        fieldDefault = RunOptions'NO_TRACE
                              +
                              +instance Data.ProtoLens.MessageEnum RunOptions'TraceLevel where
                              +        maybeToEnum 0 = Prelude.Just RunOptions'NO_TRACE
                              +        maybeToEnum 1 = Prelude.Just RunOptions'SOFTWARE_TRACE
                              +        maybeToEnum 2 = Prelude.Just RunOptions'HARDWARE_TRACE
                              +        maybeToEnum 3 = Prelude.Just RunOptions'FULL_TRACE
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum RunOptions'NO_TRACE = "NO_TRACE"
                              +        showEnum RunOptions'SOFTWARE_TRACE = "SOFTWARE_TRACE"
                              +        showEnum RunOptions'HARDWARE_TRACE = "HARDWARE_TRACE"
                              +        showEnum RunOptions'FULL_TRACE = "FULL_TRACE"
                              +        readEnum "NO_TRACE" = Prelude.Just RunOptions'NO_TRACE
                              +        readEnum "SOFTWARE_TRACE" = Prelude.Just RunOptions'SOFTWARE_TRACE
                              +        readEnum "HARDWARE_TRACE" = Prelude.Just RunOptions'HARDWARE_TRACE
                              +        readEnum "FULL_TRACE" = Prelude.Just RunOptions'FULL_TRACE
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum RunOptions'TraceLevel where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum TraceLevel: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum RunOptions'NO_TRACE = 0
                              +        fromEnum RunOptions'SOFTWARE_TRACE = 1
                              +        fromEnum RunOptions'HARDWARE_TRACE = 2
                              +        fromEnum RunOptions'FULL_TRACE = 3
                              +        succ RunOptions'FULL_TRACE
                              +          = Prelude.error
                              +              "RunOptions'TraceLevel.succ: bad argument RunOptions'FULL_TRACE. This value would be out of bounds."
                              +        succ RunOptions'NO_TRACE = RunOptions'SOFTWARE_TRACE
                              +        succ RunOptions'SOFTWARE_TRACE = RunOptions'HARDWARE_TRACE
                              +        succ RunOptions'HARDWARE_TRACE = RunOptions'FULL_TRACE
                              +        pred RunOptions'NO_TRACE
                              +          = Prelude.error
                              +              "RunOptions'TraceLevel.pred: bad argument RunOptions'NO_TRACE. This value would be out of bounds."
                              +        pred RunOptions'SOFTWARE_TRACE = RunOptions'NO_TRACE
                              +        pred RunOptions'HARDWARE_TRACE = RunOptions'SOFTWARE_TRACE
                              +        pred RunOptions'FULL_TRACE = RunOptions'HARDWARE_TRACE
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded RunOptions'TraceLevel where
                              +        minBound = RunOptions'NO_TRACE
                              +        maxBound = RunOptions'FULL_TRACE
                              +
                              +data ThreadPoolOptionProto = ThreadPoolOptionProto{_ThreadPoolOptionProto'numThreads
                              +                                                   :: !Data.Int.Int32,
                              +                                                   _ThreadPoolOptionProto'globalName ::
                              +                                                   !Data.Text.Text}
                              +                           deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "numThreads" f ThreadPoolOptionProto
                              +           ThreadPoolOptionProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ThreadPoolOptionProto'numThreads
                              +                 (\ x__ y__ -> x__{_ThreadPoolOptionProto'numThreads = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "globalName" f ThreadPoolOptionProto
                              +           ThreadPoolOptionProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ThreadPoolOptionProto'globalName
                              +                 (\ x__ y__ -> x__{_ThreadPoolOptionProto'globalName = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default ThreadPoolOptionProto where
                              +        def
                              +          = ThreadPoolOptionProto{_ThreadPoolOptionProto'numThreads =
                              +                                    Data.ProtoLens.fieldDefault,
                              +                                  _ThreadPoolOptionProto'globalName = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message ThreadPoolOptionProto where
                              +        descriptor
                              +          = let numThreads__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "num_threads"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numThreads)
                              +                      :: Data.ProtoLens.FieldDescriptor ThreadPoolOptionProto
                              +                globalName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "global_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional globalName)
                              +                      :: Data.ProtoLens.FieldDescriptor ThreadPoolOptionProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.ThreadPoolOptionProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, numThreads__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, globalName__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("num_threads", numThreads__field_descriptor),
                              +                    ("global_name", globalName__field_descriptor)])
                              +
                              +allocatorType ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "allocatorType" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +allocatorType
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allocatorType")
                              +
                              +allowGrowth ::
                              +            forall f s t a b . (Lens.Labels.HasLens "allowGrowth" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +allowGrowth
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allowGrowth")
                              +
                              +allowSoftPlacement ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "allowSoftPlacement" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +allowSoftPlacement
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "allowSoftPlacement")
                              +
                              +buildCostModel ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "buildCostModel" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +buildCostModel
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "buildCostModel")
                              +
                              +buildCostModelAfter ::
                              +                    forall f s t a b .
                              +                      (Lens.Labels.HasLens "buildCostModelAfter" f s t a b) =>
                              +                      Lens.Family2.LensLike f s t a b
                              +buildCostModelAfter
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "buildCostModelAfter")
                              +
                              +clusterDef ::
                              +           forall f s t a b . (Lens.Labels.HasLens "clusterDef" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +clusterDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "clusterDef")
                              +
                              +costGraph ::
                              +          forall f s t a b . (Lens.Labels.HasLens "costGraph" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +costGraph
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "costGraph")
                              +
                              +debugOptions ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "debugOptions" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +debugOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "debugOptions")
                              +
                              +deferredDeletionBytes ::
                              +                      forall f s t a b .
                              +                        (Lens.Labels.HasLens "deferredDeletionBytes" f s t a b) =>
                              +                        Lens.Family2.LensLike f s t a b
                              +deferredDeletionBytes
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "deferredDeletionBytes")
                              +
                              +deviceCount ::
                              +            forall f s t a b . (Lens.Labels.HasLens "deviceCount" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +deviceCount
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "deviceCount")
                              +
                              +deviceFilters ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "deviceFilters" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +deviceFilters
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "deviceFilters")
                              +
                              +doCommonSubexpressionElimination ::
                              +                                 forall f s t a b .
                              +                                   (Lens.Labels.HasLens "doCommonSubexpressionElimination" f s t a
                              +                                      b) =>
                              +                                   Lens.Family2.LensLike f s t a b
                              +doCommonSubexpressionElimination
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "doCommonSubexpressionElimination")
                              +
                              +doConstantFolding ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "doConstantFolding" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +doConstantFolding
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "doConstantFolding")
                              +
                              +doFunctionInlining ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "doFunctionInlining" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +doFunctionInlining
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "doFunctionInlining")
                              +
                              +enableBfloat16Sendrecv ::
                              +                       forall f s t a b .
                              +                         (Lens.Labels.HasLens "enableBfloat16Sendrecv" f s t a b) =>
                              +                         Lens.Family2.LensLike f s t a b
                              +enableBfloat16Sendrecv
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "enableBfloat16Sendrecv")
                              +
                              +enableRecvScheduling ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "enableRecvScheduling" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +enableRecvScheduling
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "enableRecvScheduling")
                              +
                              +forceGpuCompatible ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "forceGpuCompatible" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +forceGpuCompatible
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "forceGpuCompatible")
                              +
                              +globalJitLevel ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "globalJitLevel" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +globalJitLevel
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "globalJitLevel")
                              +
                              +globalName ::
                              +           forall f s t a b . (Lens.Labels.HasLens "globalName" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +globalName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "globalName")
                              +
                              +gpuOptions ::
                              +           forall f s t a b . (Lens.Labels.HasLens "gpuOptions" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +gpuOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "gpuOptions")
                              +
                              +graphOptions ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "graphOptions" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +graphOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "graphOptions")
                              +
                              +inferShapes ::
                              +            forall f s t a b . (Lens.Labels.HasLens "inferShapes" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +inferShapes
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "inferShapes")
                              +
                              +interOpParallelismThreads ::
                              +                          forall f s t a b .
                              +                            (Lens.Labels.HasLens "interOpParallelismThreads" f s t a b) =>
                              +                            Lens.Family2.LensLike f s t a b
                              +interOpParallelismThreads
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "interOpParallelismThreads")
                              +
                              +interOpThreadPool ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "interOpThreadPool" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +interOpThreadPool
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "interOpThreadPool")
                              +
                              +intraOpParallelismThreads ::
                              +                          forall f s t a b .
                              +                            (Lens.Labels.HasLens "intraOpParallelismThreads" f s t a b) =>
                              +                            Lens.Family2.LensLike f s t a b
                              +intraOpParallelismThreads
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "intraOpParallelismThreads")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +logDevicePlacement ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "logDevicePlacement" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +logDevicePlacement
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "logDevicePlacement")
                              +
                              +maybe'clusterDef ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "maybe'clusterDef" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +maybe'clusterDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'clusterDef")
                              +
                              +maybe'costGraph ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'costGraph" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'costGraph
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'costGraph")
                              +
                              +maybe'debugOptions ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "maybe'debugOptions" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +maybe'debugOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'debugOptions")
                              +
                              +maybe'gpuOptions ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "maybe'gpuOptions" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +maybe'gpuOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'gpuOptions")
                              +
                              +maybe'graphOptions ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "maybe'graphOptions" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +maybe'graphOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'graphOptions")
                              +
                              +maybe'optimizerOptions ::
                              +                       forall f s t a b .
                              +                         (Lens.Labels.HasLens "maybe'optimizerOptions" f s t a b) =>
                              +                         Lens.Family2.LensLike f s t a b
                              +maybe'optimizerOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'optimizerOptions")
                              +
                              +maybe'rewriteOptions ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "maybe'rewriteOptions" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +maybe'rewriteOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'rewriteOptions")
                              +
                              +maybe'rpcOptions ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "maybe'rpcOptions" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +maybe'rpcOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'rpcOptions")
                              +
                              +maybe'stepStats ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'stepStats" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'stepStats
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'stepStats")
                              +
                              +numThreads ::
                              +           forall f s t a b . (Lens.Labels.HasLens "numThreads" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +numThreads
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "numThreads")
                              +
                              +operationTimeoutInMs ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "operationTimeoutInMs" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +operationTimeoutInMs
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "operationTimeoutInMs")
                              +
                              +optLevel ::
                              +         forall f s t a b . (Lens.Labels.HasLens "optLevel" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +optLevel
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "optLevel")
                              +
                              +optimizerOptions ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "optimizerOptions" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +optimizerOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "optimizerOptions")
                              +
                              +outputPartitionGraphs ::
                              +                      forall f s t a b .
                              +                        (Lens.Labels.HasLens "outputPartitionGraphs" f s t a b) =>
                              +                        Lens.Family2.LensLike f s t a b
                              +outputPartitionGraphs
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "outputPartitionGraphs")
                              +
                              +partitionGraphs ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "partitionGraphs" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +partitionGraphs
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "partitionGraphs")
                              +
                              +perProcessGpuMemoryFraction ::
                              +                            forall f s t a b .
                              +                              (Lens.Labels.HasLens "perProcessGpuMemoryFraction" f s t a b) =>
                              +                              Lens.Family2.LensLike f s t a b
                              +perProcessGpuMemoryFraction
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "perProcessGpuMemoryFraction")
                              +
                              +placePrunedGraph ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "placePrunedGraph" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +placePrunedGraph
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "placePrunedGraph")
                              +
                              +placementPeriod ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "placementPeriod" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +placementPeriod
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "placementPeriod")
                              +
                              +pollingActiveDelayUsecs ::
                              +                        forall f s t a b .
                              +                          (Lens.Labels.HasLens "pollingActiveDelayUsecs" f s t a b) =>
                              +                          Lens.Family2.LensLike f s t a b
                              +pollingActiveDelayUsecs
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "pollingActiveDelayUsecs")
                              +
                              +pollingInactiveDelayMsecs ::
                              +                          forall f s t a b .
                              +                            (Lens.Labels.HasLens "pollingInactiveDelayMsecs" f s t a b) =>
                              +                            Lens.Family2.LensLike f s t a b
                              +pollingInactiveDelayMsecs
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "pollingInactiveDelayMsecs")
                              +
                              +rewriteOptions ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "rewriteOptions" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +rewriteOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "rewriteOptions")
                              +
                              +rpcOptions ::
                              +           forall f s t a b . (Lens.Labels.HasLens "rpcOptions" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +rpcOptions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "rpcOptions")
                              +
                              +sessionInterOpThreadPool ::
                              +                         forall f s t a b .
                              +                           (Lens.Labels.HasLens "sessionInterOpThreadPool" f s t a b) =>
                              +                           Lens.Family2.LensLike f s t a b
                              +sessionInterOpThreadPool
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "sessionInterOpThreadPool")
                              +
                              +stepStats ::
                              +          forall f s t a b . (Lens.Labels.HasLens "stepStats" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +stepStats
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "stepStats")
                              +
                              +timelineStep ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "timelineStep" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +timelineStep
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "timelineStep")
                              +
                              +timeoutInMs ::
                              +            forall f s t a b . (Lens.Labels.HasLens "timeoutInMs" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +timeoutInMs
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "timeoutInMs")
                              +
                              +traceLevel ::
                              +           forall f s t a b . (Lens.Labels.HasLens "traceLevel" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +traceLevel
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "traceLevel")
                              +
                              +usePerSessionThreads ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "usePerSessionThreads" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +usePerSessionThreads
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "usePerSessionThreads")
                              +
                              +useRpcForInprocessMaster ::
                              +                         forall f s t a b .
                              +                           (Lens.Labels.HasLens "useRpcForInprocessMaster" f s t a b) =>
                              +                           Lens.Family2.LensLike f s t a b
                              +useRpcForInprocessMaster
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "useRpcForInprocessMaster")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              +
                              +visibleDeviceList ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "visibleDeviceList" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +visibleDeviceList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "visibleDeviceList")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.ControlFlow.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.ControlFlow.html new file mode 100644 index 0000000..b0a4a6d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.ControlFlow.html @@ -0,0 +1,651 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/control_flow.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.ControlFlow where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data CondContextDef = CondContextDef{_CondContextDef'contextName ::
                              +                                     !Data.Text.Text,
                              +                                     _CondContextDef'predName :: !Data.Text.Text,
                              +                                     _CondContextDef'pivotName :: !Data.Text.Text,
                              +                                     _CondContextDef'branch :: !Data.Int.Int32,
                              +                                     _CondContextDef'valuesDef :: !(Prelude.Maybe ValuesDef)}
                              +                    deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "contextName" f CondContextDef CondContextDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CondContextDef'contextName
                              +                 (\ x__ y__ -> x__{_CondContextDef'contextName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "predName" f CondContextDef CondContextDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CondContextDef'predName
                              +                 (\ x__ y__ -> x__{_CondContextDef'predName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "pivotName" f CondContextDef CondContextDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CondContextDef'pivotName
                              +                 (\ x__ y__ -> x__{_CondContextDef'pivotName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "branch" f CondContextDef CondContextDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CondContextDef'branch
                              +                 (\ x__ y__ -> x__{_CondContextDef'branch = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ ValuesDef, b ~ ValuesDef, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "valuesDef" f CondContextDef CondContextDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CondContextDef'valuesDef
                              +                 (\ x__ y__ -> x__{_CondContextDef'valuesDef = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe ValuesDef, b ~ Prelude.Maybe ValuesDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'valuesDef" f CondContextDef
                              +           CondContextDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CondContextDef'valuesDef
                              +                 (\ x__ y__ -> x__{_CondContextDef'valuesDef = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CondContextDef where
                              +        def
                              +          = CondContextDef{_CondContextDef'contextName =
                              +                             Data.ProtoLens.fieldDefault,
                              +                           _CondContextDef'predName = Data.ProtoLens.fieldDefault,
                              +                           _CondContextDef'pivotName = Data.ProtoLens.fieldDefault,
                              +                           _CondContextDef'branch = Data.ProtoLens.fieldDefault,
                              +                           _CondContextDef'valuesDef = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message CondContextDef where
                              +        descriptor
                              +          = let contextName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "context_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional contextName)
                              +                      :: Data.ProtoLens.FieldDescriptor CondContextDef
                              +                predName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "pred_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional predName)
                              +                      :: Data.ProtoLens.FieldDescriptor CondContextDef
                              +                pivotName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "pivot_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional pivotName)
                              +                      :: Data.ProtoLens.FieldDescriptor CondContextDef
                              +                branch__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "branch"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional branch)
                              +                      :: Data.ProtoLens.FieldDescriptor CondContextDef
                              +                valuesDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "values_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor ValuesDef)
                              +                      (Data.ProtoLens.OptionalField maybe'valuesDef)
                              +                      :: Data.ProtoLens.FieldDescriptor CondContextDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CondContextDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, contextName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, predName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, pivotName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, branch__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, valuesDef__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("context_name", contextName__field_descriptor),
                              +                    ("pred_name", predName__field_descriptor),
                              +                    ("pivot_name", pivotName__field_descriptor),
                              +                    ("branch", branch__field_descriptor),
                              +                    ("values_def", valuesDef__field_descriptor)])
                              +
                              +data ValuesDef = ValuesDef{_ValuesDef'values :: ![Data.Text.Text],
                              +                           _ValuesDef'externalValues ::
                              +                           !(Data.Map.Map Data.Text.Text Data.Text.Text)}
                              +               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "values" f ValuesDef ValuesDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ValuesDef'values
                              +                 (\ x__ y__ -> x__{_ValuesDef'values = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text Data.Text.Text,
                              +          b ~ Data.Map.Map Data.Text.Text Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "externalValues" f ValuesDef ValuesDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ValuesDef'externalValues
                              +                 (\ x__ y__ -> x__{_ValuesDef'externalValues = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default ValuesDef where
                              +        def
                              +          = ValuesDef{_ValuesDef'values = [],
                              +                      _ValuesDef'externalValues = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message ValuesDef where
                              +        descriptor
                              +          = let values__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "values"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked values)
                              +                      :: Data.ProtoLens.FieldDescriptor ValuesDef
                              +                externalValues__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "external_values"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor ValuesDef'ExternalValuesEntry)
                              +                      (Data.ProtoLens.MapField key value externalValues)
                              +                      :: Data.ProtoLens.FieldDescriptor ValuesDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.ValuesDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, values__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, externalValues__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("values", values__field_descriptor),
                              +                    ("external_values", externalValues__field_descriptor)])
                              +
                              +data ValuesDef'ExternalValuesEntry = ValuesDef'ExternalValuesEntry{_ValuesDef'ExternalValuesEntry'key
                              +                                                                   :: !Data.Text.Text,
                              +                                                                   _ValuesDef'ExternalValuesEntry'value
                              +                                                                   :: !Data.Text.Text}
                              +                                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f ValuesDef'ExternalValuesEntry
                              +           ValuesDef'ExternalValuesEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ValuesDef'ExternalValuesEntry'key
                              +                 (\ x__ y__ -> x__{_ValuesDef'ExternalValuesEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f ValuesDef'ExternalValuesEntry
                              +           ValuesDef'ExternalValuesEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ValuesDef'ExternalValuesEntry'value
                              +                 (\ x__ y__ -> x__{_ValuesDef'ExternalValuesEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default ValuesDef'ExternalValuesEntry
                              +         where
                              +        def
                              +          = ValuesDef'ExternalValuesEntry{_ValuesDef'ExternalValuesEntry'key
                              +                                            = Data.ProtoLens.fieldDefault,
                              +                                          _ValuesDef'ExternalValuesEntry'value =
                              +                                            Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message ValuesDef'ExternalValuesEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor ValuesDef'ExternalValuesEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional value)
                              +                      :: Data.ProtoLens.FieldDescriptor ValuesDef'ExternalValuesEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.ValuesDef.ExternalValuesEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data WhileContextDef = WhileContextDef{_WhileContextDef'contextName
                              +                                       :: !Data.Text.Text,
                              +                                       _WhileContextDef'parallelIterations :: !Data.Int.Int32,
                              +                                       _WhileContextDef'backProp :: !Prelude.Bool,
                              +                                       _WhileContextDef'swapMemory :: !Prelude.Bool,
                              +                                       _WhileContextDef'pivotName :: !Data.Text.Text,
                              +                                       _WhileContextDef'pivotForPredName :: !Data.Text.Text,
                              +                                       _WhileContextDef'pivotForBodyName :: !Data.Text.Text,
                              +                                       _WhileContextDef'loopExitNames :: ![Data.Text.Text],
                              +                                       _WhileContextDef'loopEnterNames :: ![Data.Text.Text],
                              +                                       _WhileContextDef'valuesDef :: !(Prelude.Maybe ValuesDef)}
                              +                     deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "contextName" f WhileContextDef WhileContextDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'contextName
                              +                 (\ x__ y__ -> x__{_WhileContextDef'contextName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "parallelIterations" f WhileContextDef
                              +           WhileContextDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'parallelIterations
                              +                 (\ x__ y__ -> x__{_WhileContextDef'parallelIterations = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "backProp" f WhileContextDef WhileContextDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'backProp
                              +                 (\ x__ y__ -> x__{_WhileContextDef'backProp = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "swapMemory" f WhileContextDef WhileContextDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'swapMemory
                              +                 (\ x__ y__ -> x__{_WhileContextDef'swapMemory = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "pivotName" f WhileContextDef WhileContextDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'pivotName
                              +                 (\ x__ y__ -> x__{_WhileContextDef'pivotName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "pivotForPredName" f WhileContextDef
                              +           WhileContextDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'pivotForPredName
                              +                 (\ x__ y__ -> x__{_WhileContextDef'pivotForPredName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "pivotForBodyName" f WhileContextDef
                              +           WhileContextDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'pivotForBodyName
                              +                 (\ x__ y__ -> x__{_WhileContextDef'pivotForBodyName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "loopExitNames" f WhileContextDef
                              +           WhileContextDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'loopExitNames
                              +                 (\ x__ y__ -> x__{_WhileContextDef'loopExitNames = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "loopEnterNames" f WhileContextDef
                              +           WhileContextDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'loopEnterNames
                              +                 (\ x__ y__ -> x__{_WhileContextDef'loopEnterNames = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ ValuesDef, b ~ ValuesDef, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "valuesDef" f WhileContextDef WhileContextDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'valuesDef
                              +                 (\ x__ y__ -> x__{_WhileContextDef'valuesDef = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe ValuesDef, b ~ Prelude.Maybe ValuesDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'valuesDef" f WhileContextDef
                              +           WhileContextDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _WhileContextDef'valuesDef
                              +                 (\ x__ y__ -> x__{_WhileContextDef'valuesDef = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default WhileContextDef where
                              +        def
                              +          = WhileContextDef{_WhileContextDef'contextName =
                              +                              Data.ProtoLens.fieldDefault,
                              +                            _WhileContextDef'parallelIterations = Data.ProtoLens.fieldDefault,
                              +                            _WhileContextDef'backProp = Data.ProtoLens.fieldDefault,
                              +                            _WhileContextDef'swapMemory = Data.ProtoLens.fieldDefault,
                              +                            _WhileContextDef'pivotName = Data.ProtoLens.fieldDefault,
                              +                            _WhileContextDef'pivotForPredName = Data.ProtoLens.fieldDefault,
                              +                            _WhileContextDef'pivotForBodyName = Data.ProtoLens.fieldDefault,
                              +                            _WhileContextDef'loopExitNames = [],
                              +                            _WhileContextDef'loopEnterNames = [],
                              +                            _WhileContextDef'valuesDef = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message WhileContextDef where
                              +        descriptor
                              +          = let contextName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "context_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional contextName)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +                parallelIterations__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "parallel_iterations"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         parallelIterations)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +                backProp__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "back_prop"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional backProp)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +                swapMemory__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "swap_memory"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional swapMemory)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +                pivotName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "pivot_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional pivotName)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +                pivotForPredName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "pivot_for_pred_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         pivotForPredName)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +                pivotForBodyName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "pivot_for_body_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         pivotForBodyName)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +                loopExitNames__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "loop_exit_names"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         loopExitNames)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +                loopEnterNames__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "loop_enter_names"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         loopEnterNames)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +                valuesDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "values_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor ValuesDef)
                              +                      (Data.ProtoLens.OptionalField maybe'valuesDef)
                              +                      :: Data.ProtoLens.FieldDescriptor WhileContextDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.WhileContextDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, contextName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, parallelIterations__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, backProp__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, swapMemory__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, pivotName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, pivotForPredName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, pivotForBodyName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, loopExitNames__field_descriptor),
                              +                    (Data.ProtoLens.Tag 10, loopEnterNames__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, valuesDef__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("context_name", contextName__field_descriptor),
                              +                    ("parallel_iterations", parallelIterations__field_descriptor),
                              +                    ("back_prop", backProp__field_descriptor),
                              +                    ("swap_memory", swapMemory__field_descriptor),
                              +                    ("pivot_name", pivotName__field_descriptor),
                              +                    ("pivot_for_pred_name", pivotForPredName__field_descriptor),
                              +                    ("pivot_for_body_name", pivotForBodyName__field_descriptor),
                              +                    ("loop_exit_names", loopExitNames__field_descriptor),
                              +                    ("loop_enter_names", loopEnterNames__field_descriptor),
                              +                    ("values_def", valuesDef__field_descriptor)])
                              +
                              +backProp ::
                              +         forall f s t a b . (Lens.Labels.HasLens "backProp" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +backProp
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "backProp")
                              +
                              +branch ::
                              +       forall f s t a b . (Lens.Labels.HasLens "branch" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +branch
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "branch")
                              +
                              +contextName ::
                              +            forall f s t a b . (Lens.Labels.HasLens "contextName" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +contextName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "contextName")
                              +
                              +externalValues ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "externalValues" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +externalValues
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "externalValues")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +loopEnterNames ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "loopEnterNames" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +loopEnterNames
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "loopEnterNames")
                              +
                              +loopExitNames ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "loopExitNames" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +loopExitNames
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "loopExitNames")
                              +
                              +maybe'valuesDef ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'valuesDef" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'valuesDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'valuesDef")
                              +
                              +parallelIterations ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "parallelIterations" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +parallelIterations
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "parallelIterations")
                              +
                              +pivotForBodyName ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "pivotForBodyName" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +pivotForBodyName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "pivotForBodyName")
                              +
                              +pivotForPredName ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "pivotForPredName" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +pivotForPredName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "pivotForPredName")
                              +
                              +pivotName ::
                              +          forall f s t a b . (Lens.Labels.HasLens "pivotName" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +pivotName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "pivotName")
                              +
                              +predName ::
                              +         forall f s t a b . (Lens.Labels.HasLens "predName" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +predName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "predName")
                              +
                              +swapMemory ::
                              +           forall f s t a b . (Lens.Labels.HasLens "swapMemory" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +swapMemory
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "swapMemory")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              +
                              +values ::
                              +       forall f s t a b . (Lens.Labels.HasLens "values" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +values
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "values")
                              +
                              +valuesDef ::
                              +          forall f s t a b . (Lens.Labels.HasLens "valuesDef" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +valuesDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "valuesDef")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Debug.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Debug.html new file mode 100644 index 0000000..ecf4e95 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Debug.html @@ -0,0 +1,273 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/debug.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.Debug where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data DebugOptions = DebugOptions{_DebugOptions'debugTensorWatchOpts
                              +                                 :: ![DebugTensorWatch],
                              +                                 _DebugOptions'globalStep :: !Data.Int.Int64}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [DebugTensorWatch], b ~ [DebugTensorWatch],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "debugTensorWatchOpts" f DebugOptions
                              +           DebugOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DebugOptions'debugTensorWatchOpts
                              +                 (\ x__ y__ -> x__{_DebugOptions'debugTensorWatchOpts = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "globalStep" f DebugOptions DebugOptions a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DebugOptions'globalStep
                              +                 (\ x__ y__ -> x__{_DebugOptions'globalStep = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default DebugOptions where
                              +        def
                              +          = DebugOptions{_DebugOptions'debugTensorWatchOpts = [],
                              +                         _DebugOptions'globalStep = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message DebugOptions where
                              +        descriptor
                              +          = let debugTensorWatchOpts__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "debug_tensor_watch_opts"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor DebugTensorWatch)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         debugTensorWatchOpts)
                              +                      :: Data.ProtoLens.FieldDescriptor DebugOptions
                              +                globalStep__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "global_step"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional globalStep)
                              +                      :: Data.ProtoLens.FieldDescriptor DebugOptions
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.DebugOptions")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 4, debugTensorWatchOpts__field_descriptor),
                              +                    (Data.ProtoLens.Tag 10, globalStep__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("debug_tensor_watch_opts",
                              +                     debugTensorWatchOpts__field_descriptor),
                              +                    ("global_step", globalStep__field_descriptor)])
                              +
                              +data DebugTensorWatch = DebugTensorWatch{_DebugTensorWatch'nodeName
                              +                                         :: !Data.Text.Text,
                              +                                         _DebugTensorWatch'outputSlot :: !Data.Int.Int32,
                              +                                         _DebugTensorWatch'debugOps :: ![Data.Text.Text],
                              +                                         _DebugTensorWatch'debugUrls :: ![Data.Text.Text],
                              +                                         _DebugTensorWatch'tolerateDebugOpCreationFailures ::
                              +                                         !Prelude.Bool}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "nodeName" f DebugTensorWatch DebugTensorWatch
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DebugTensorWatch'nodeName
                              +                 (\ x__ y__ -> x__{_DebugTensorWatch'nodeName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "outputSlot" f DebugTensorWatch
                              +           DebugTensorWatch
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DebugTensorWatch'outputSlot
                              +                 (\ x__ y__ -> x__{_DebugTensorWatch'outputSlot = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "debugOps" f DebugTensorWatch DebugTensorWatch
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DebugTensorWatch'debugOps
                              +                 (\ x__ y__ -> x__{_DebugTensorWatch'debugOps = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "debugUrls" f DebugTensorWatch DebugTensorWatch
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _DebugTensorWatch'debugUrls
                              +                 (\ x__ y__ -> x__{_DebugTensorWatch'debugUrls = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tolerateDebugOpCreationFailures" f
                              +           DebugTensorWatch
                              +           DebugTensorWatch
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _DebugTensorWatch'tolerateDebugOpCreationFailures
                              +                 (\ x__ y__ ->
                              +                    x__{_DebugTensorWatch'tolerateDebugOpCreationFailures = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default DebugTensorWatch where
                              +        def
                              +          = DebugTensorWatch{_DebugTensorWatch'nodeName =
                              +                               Data.ProtoLens.fieldDefault,
                              +                             _DebugTensorWatch'outputSlot = Data.ProtoLens.fieldDefault,
                              +                             _DebugTensorWatch'debugOps = [], _DebugTensorWatch'debugUrls = [],
                              +                             _DebugTensorWatch'tolerateDebugOpCreationFailures =
                              +                               Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message DebugTensorWatch where
                              +        descriptor
                              +          = let nodeName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "node_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional nodeName)
                              +                      :: Data.ProtoLens.FieldDescriptor DebugTensorWatch
                              +                outputSlot__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "output_slot"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional outputSlot)
                              +                      :: Data.ProtoLens.FieldDescriptor DebugTensorWatch
                              +                debugOps__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "debug_ops"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked debugOps)
                              +                      :: Data.ProtoLens.FieldDescriptor DebugTensorWatch
                              +                debugUrls__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "debug_urls"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked debugUrls)
                              +                      :: Data.ProtoLens.FieldDescriptor DebugTensorWatch
                              +                tolerateDebugOpCreationFailures__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor
                              +                      "tolerate_debug_op_creation_failures"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         tolerateDebugOpCreationFailures)
                              +                      :: Data.ProtoLens.FieldDescriptor DebugTensorWatch
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.DebugTensorWatch")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, nodeName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, outputSlot__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, debugOps__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, debugUrls__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5,
                              +                     tolerateDebugOpCreationFailures__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("node_name", nodeName__field_descriptor),
                              +                    ("output_slot", outputSlot__field_descriptor),
                              +                    ("debug_ops", debugOps__field_descriptor),
                              +                    ("debug_urls", debugUrls__field_descriptor),
                              +                    ("tolerate_debug_op_creation_failures",
                              +                     tolerateDebugOpCreationFailures__field_descriptor)])
                              +
                              +debugOps ::
                              +         forall f s t a b . (Lens.Labels.HasLens "debugOps" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +debugOps
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "debugOps")
                              +
                              +debugTensorWatchOpts ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "debugTensorWatchOpts" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +debugTensorWatchOpts
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "debugTensorWatchOpts")
                              +
                              +debugUrls ::
                              +          forall f s t a b . (Lens.Labels.HasLens "debugUrls" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +debugUrls
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "debugUrls")
                              +
                              +globalStep ::
                              +           forall f s t a b . (Lens.Labels.HasLens "globalStep" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +globalStep
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "globalStep")
                              +
                              +nodeName ::
                              +         forall f s t a b . (Lens.Labels.HasLens "nodeName" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +nodeName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "nodeName")
                              +
                              +outputSlot ::
                              +           forall f s t a b . (Lens.Labels.HasLens "outputSlot" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +outputSlot
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "outputSlot")
                              +
                              +tolerateDebugOpCreationFailures ::
                              +                                forall f s t a b .
                              +                                  (Lens.Labels.HasLens "tolerateDebugOpCreationFailures" f s t a
                              +                                     b) =>
                              +                                  Lens.Family2.LensLike f s t a b
                              +tolerateDebugOpCreationFailures
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "tolerateDebugOpCreationFailures")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.MetaGraph.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.MetaGraph.html new file mode 100644 index 0000000..e012a88 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.MetaGraph.html @@ -0,0 +1,1918 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/meta_graph.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.MetaGraph where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Google.Protobuf.Any
                              +import qualified Proto.Tensorflow.Core.Framework.Graph
                              +import qualified Proto.Tensorflow.Core.Framework.OpDef
                              +import qualified Proto.Tensorflow.Core.Framework.TensorShape
                              +import qualified Proto.Tensorflow.Core.Framework.Types
                              +import qualified Proto.Tensorflow.Core.Protobuf.Saver
                              +
                              +data AssetFileDef = AssetFileDef{_AssetFileDef'tensorInfo ::
                              +                                 !(Prelude.Maybe TensorInfo),
                              +                                 _AssetFileDef'filename :: !Data.Text.Text}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ TensorInfo, b ~ TensorInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensorInfo" f AssetFileDef AssetFileDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AssetFileDef'tensorInfo
                              +                 (\ x__ y__ -> x__{_AssetFileDef'tensorInfo = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe TensorInfo,
                              +          b ~ Prelude.Maybe TensorInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'tensorInfo" f AssetFileDef AssetFileDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AssetFileDef'tensorInfo
                              +                 (\ x__ y__ -> x__{_AssetFileDef'tensorInfo = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "filename" f AssetFileDef AssetFileDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AssetFileDef'filename
                              +                 (\ x__ y__ -> x__{_AssetFileDef'filename = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default AssetFileDef where
                              +        def
                              +          = AssetFileDef{_AssetFileDef'tensorInfo = Prelude.Nothing,
                              +                         _AssetFileDef'filename = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message AssetFileDef where
                              +        descriptor
                              +          = let tensorInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor_info"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor TensorInfo)
                              +                      (Data.ProtoLens.OptionalField maybe'tensorInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor AssetFileDef
                              +                filename__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "filename"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional filename)
                              +                      :: Data.ProtoLens.FieldDescriptor AssetFileDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.AssetFileDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, tensorInfo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, filename__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("tensor_info", tensorInfo__field_descriptor),
                              +                    ("filename", filename__field_descriptor)])
                              +
                              +data CollectionDef = CollectionDef{_CollectionDef'kind ::
                              +                                   !(Prelude.Maybe CollectionDef'Kind)}
                              +                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data CollectionDef'Kind = CollectionDef'NodeList' !CollectionDef'NodeList
                              +                        | CollectionDef'BytesList' !CollectionDef'BytesList
                              +                        | CollectionDef'Int64List' !CollectionDef'Int64List
                              +                        | CollectionDef'FloatList' !CollectionDef'FloatList
                              +                        | CollectionDef'AnyList' !CollectionDef'AnyList
                              +                        deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Maybe CollectionDef'Kind,
                              +          b ~ Prelude.Maybe CollectionDef'Kind, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'kind" f CollectionDef CollectionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe CollectionDef'NodeList,
                              +          b ~ Prelude.Maybe CollectionDef'NodeList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'nodeList" f CollectionDef CollectionDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (CollectionDef'NodeList' x__val) -> Prelude.Just
                              +                                                                           x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap CollectionDef'NodeList' y__))
                              +
                              +instance (a ~ CollectionDef'NodeList, b ~ CollectionDef'NodeList,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "nodeList" f CollectionDef CollectionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (CollectionDef'NodeList' x__val) -> Prelude.Just
                              +                                                                              x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap CollectionDef'NodeList' y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe CollectionDef'BytesList,
                              +          b ~ Prelude.Maybe CollectionDef'BytesList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'bytesList" f CollectionDef CollectionDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (CollectionDef'BytesList' x__val) -> Prelude.Just
                              +                                                                            x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap CollectionDef'BytesList' y__))
                              +
                              +instance (a ~ CollectionDef'BytesList, b ~ CollectionDef'BytesList,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "bytesList" f CollectionDef CollectionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (CollectionDef'BytesList' x__val) -> Prelude.Just
                              +                                                                               x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap CollectionDef'BytesList' y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe CollectionDef'Int64List,
                              +          b ~ Prelude.Maybe CollectionDef'Int64List, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'int64List" f CollectionDef CollectionDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (CollectionDef'Int64List' x__val) -> Prelude.Just
                              +                                                                            x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap CollectionDef'Int64List' y__))
                              +
                              +instance (a ~ CollectionDef'Int64List, b ~ CollectionDef'Int64List,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "int64List" f CollectionDef CollectionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (CollectionDef'Int64List' x__val) -> Prelude.Just
                              +                                                                               x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap CollectionDef'Int64List' y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe CollectionDef'FloatList,
                              +          b ~ Prelude.Maybe CollectionDef'FloatList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'floatList" f CollectionDef CollectionDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (CollectionDef'FloatList' x__val) -> Prelude.Just
                              +                                                                            x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap CollectionDef'FloatList' y__))
                              +
                              +instance (a ~ CollectionDef'FloatList, b ~ CollectionDef'FloatList,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "floatList" f CollectionDef CollectionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (CollectionDef'FloatList' x__val) -> Prelude.Just
                              +                                                                               x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap CollectionDef'FloatList' y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe CollectionDef'AnyList,
                              +          b ~ Prelude.Maybe CollectionDef'AnyList, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'anyList" f CollectionDef CollectionDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (CollectionDef'AnyList' x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap CollectionDef'AnyList' y__))
                              +
                              +instance (a ~ CollectionDef'AnyList, b ~ CollectionDef'AnyList,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "anyList" f CollectionDef CollectionDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'kind
                              +                 (\ x__ y__ -> x__{_CollectionDef'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (CollectionDef'AnyList' x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap CollectionDef'AnyList' y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance Data.Default.Class.Default CollectionDef where
                              +        def = CollectionDef{_CollectionDef'kind = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message CollectionDef where
                              +        descriptor
                              +          = let nodeList__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "node_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CollectionDef'NodeList)
                              +                      (Data.ProtoLens.OptionalField maybe'nodeList)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef
                              +                bytesList__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "bytes_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CollectionDef'BytesList)
                              +                      (Data.ProtoLens.OptionalField maybe'bytesList)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef
                              +                int64List__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "int64_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CollectionDef'Int64List)
                              +                      (Data.ProtoLens.OptionalField maybe'int64List)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef
                              +                floatList__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "float_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CollectionDef'FloatList)
                              +                      (Data.ProtoLens.OptionalField maybe'floatList)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef
                              +                anyList__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "any_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CollectionDef'AnyList)
                              +                      (Data.ProtoLens.OptionalField maybe'anyList)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CollectionDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, nodeList__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, bytesList__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, int64List__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, floatList__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, anyList__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("node_list", nodeList__field_descriptor),
                              +                    ("bytes_list", bytesList__field_descriptor),
                              +                    ("int64_list", int64List__field_descriptor),
                              +                    ("float_list", floatList__field_descriptor),
                              +                    ("any_list", anyList__field_descriptor)])
                              +
                              +data CollectionDef'AnyList = CollectionDef'AnyList{_CollectionDef'AnyList'value
                              +                                                   :: ![Proto.Google.Protobuf.Any.Any]}
                              +                           deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Proto.Google.Protobuf.Any.Any],
                              +          b ~ [Proto.Google.Protobuf.Any.Any], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f CollectionDef'AnyList
                              +           CollectionDef'AnyList
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'AnyList'value
                              +                 (\ x__ y__ -> x__{_CollectionDef'AnyList'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CollectionDef'AnyList where
                              +        def = CollectionDef'AnyList{_CollectionDef'AnyList'value = []}
                              +
                              +instance Data.ProtoLens.Message CollectionDef'AnyList where
                              +        descriptor
                              +          = let value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Proto.Google.Protobuf.Any.Any)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked value)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef'AnyList
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CollectionDef.AnyList")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, value__field_descriptor)])
                              +                (Data.Map.fromList [("value", value__field_descriptor)])
                              +
                              +data CollectionDef'BytesList = CollectionDef'BytesList{_CollectionDef'BytesList'value
                              +                                                       :: ![Data.ByteString.ByteString]}
                              +                             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Data.ByteString.ByteString],
                              +          b ~ [Data.ByteString.ByteString], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f CollectionDef'BytesList
                              +           CollectionDef'BytesList
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'BytesList'value
                              +                 (\ x__ y__ -> x__{_CollectionDef'BytesList'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CollectionDef'BytesList where
                              +        def = CollectionDef'BytesList{_CollectionDef'BytesList'value = []}
                              +
                              +instance Data.ProtoLens.Message CollectionDef'BytesList where
                              +        descriptor
                              +          = let value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked value)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef'BytesList
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CollectionDef.BytesList")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, value__field_descriptor)])
                              +                (Data.Map.fromList [("value", value__field_descriptor)])
                              +
                              +data CollectionDef'FloatList = CollectionDef'FloatList{_CollectionDef'FloatList'value
                              +                                                       :: ![Prelude.Float]}
                              +                             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Prelude.Float], b ~ [Prelude.Float],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f CollectionDef'FloatList
                              +           CollectionDef'FloatList
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'FloatList'value
                              +                 (\ x__ y__ -> x__{_CollectionDef'FloatList'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CollectionDef'FloatList where
                              +        def = CollectionDef'FloatList{_CollectionDef'FloatList'value = []}
                              +
                              +instance Data.ProtoLens.Message CollectionDef'FloatList where
                              +        descriptor
                              +          = let value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.FloatField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed value)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef'FloatList
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CollectionDef.FloatList")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, value__field_descriptor)])
                              +                (Data.Map.fromList [("value", value__field_descriptor)])
                              +
                              +data CollectionDef'Int64List = CollectionDef'Int64List{_CollectionDef'Int64List'value
                              +                                                       :: ![Data.Int.Int64]}
                              +                             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Data.Int.Int64], b ~ [Data.Int.Int64],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f CollectionDef'Int64List
                              +           CollectionDef'Int64List
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'Int64List'value
                              +                 (\ x__ y__ -> x__{_CollectionDef'Int64List'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CollectionDef'Int64List where
                              +        def = CollectionDef'Int64List{_CollectionDef'Int64List'value = []}
                              +
                              +instance Data.ProtoLens.Message CollectionDef'Int64List where
                              +        descriptor
                              +          = let value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed value)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef'Int64List
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CollectionDef.Int64List")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, value__field_descriptor)])
                              +                (Data.Map.fromList [("value", value__field_descriptor)])
                              +
                              +data CollectionDef'NodeList = CollectionDef'NodeList{_CollectionDef'NodeList'value
                              +                                                     :: ![Data.Text.Text]}
                              +                            deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f CollectionDef'NodeList
                              +           CollectionDef'NodeList
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CollectionDef'NodeList'value
                              +                 (\ x__ y__ -> x__{_CollectionDef'NodeList'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CollectionDef'NodeList where
                              +        def = CollectionDef'NodeList{_CollectionDef'NodeList'value = []}
                              +
                              +instance Data.ProtoLens.Message CollectionDef'NodeList where
                              +        descriptor
                              +          = let value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked value)
                              +                      :: Data.ProtoLens.FieldDescriptor CollectionDef'NodeList
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CollectionDef.NodeList")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, value__field_descriptor)])
                              +                (Data.Map.fromList [("value", value__field_descriptor)])
                              +
                              +data MetaGraphDef = MetaGraphDef{_MetaGraphDef'metaInfoDef ::
                              +                                 !(Prelude.Maybe MetaGraphDef'MetaInfoDef),
                              +                                 _MetaGraphDef'graphDef ::
                              +                                 !(Prelude.Maybe Proto.Tensorflow.Core.Framework.Graph.GraphDef),
                              +                                 _MetaGraphDef'saverDef ::
                              +                                 !(Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Saver.SaverDef),
                              +                                 _MetaGraphDef'collectionDef ::
                              +                                 !(Data.Map.Map Data.Text.Text CollectionDef),
                              +                                 _MetaGraphDef'signatureDef ::
                              +                                 !(Data.Map.Map Data.Text.Text SignatureDef),
                              +                                 _MetaGraphDef'assetFileDef :: ![AssetFileDef]}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ MetaGraphDef'MetaInfoDef,
                              +          b ~ MetaGraphDef'MetaInfoDef, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "metaInfoDef" f MetaGraphDef MetaGraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'metaInfoDef
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'metaInfoDef = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe MetaGraphDef'MetaInfoDef,
                              +          b ~ Prelude.Maybe MetaGraphDef'MetaInfoDef, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'metaInfoDef" f MetaGraphDef MetaGraphDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'metaInfoDef
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'metaInfoDef = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Graph.GraphDef,
                              +          b ~ Proto.Tensorflow.Core.Framework.Graph.GraphDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "graphDef" f MetaGraphDef MetaGraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'graphDef
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'graphDef = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Graph.GraphDef,
                              +          b ~ Prelude.Maybe Proto.Tensorflow.Core.Framework.Graph.GraphDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'graphDef" f MetaGraphDef MetaGraphDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'graphDef
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'graphDef = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Protobuf.Saver.SaverDef,
                              +          b ~ Proto.Tensorflow.Core.Protobuf.Saver.SaverDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "saverDef" f MetaGraphDef MetaGraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'saverDef
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'saverDef = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Saver.SaverDef,
                              +          b ~ Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Saver.SaverDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'saverDef" f MetaGraphDef MetaGraphDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'saverDef
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'saverDef = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text CollectionDef,
                              +          b ~ Data.Map.Map Data.Text.Text CollectionDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "collectionDef" f MetaGraphDef MetaGraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'collectionDef
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'collectionDef = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text SignatureDef,
                              +          b ~ Data.Map.Map Data.Text.Text SignatureDef, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "signatureDef" f MetaGraphDef MetaGraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'signatureDef
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'signatureDef = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [AssetFileDef], b ~ [AssetFileDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "assetFileDef" f MetaGraphDef MetaGraphDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'assetFileDef
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'assetFileDef = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MetaGraphDef where
                              +        def
                              +          = MetaGraphDef{_MetaGraphDef'metaInfoDef = Prelude.Nothing,
                              +                         _MetaGraphDef'graphDef = Prelude.Nothing,
                              +                         _MetaGraphDef'saverDef = Prelude.Nothing,
                              +                         _MetaGraphDef'collectionDef = Data.Map.empty,
                              +                         _MetaGraphDef'signatureDef = Data.Map.empty,
                              +                         _MetaGraphDef'assetFileDef = []}
                              +
                              +instance Data.ProtoLens.Message MetaGraphDef where
                              +        descriptor
                              +          = let metaInfoDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "meta_info_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor MetaGraphDef'MetaInfoDef)
                              +                      (Data.ProtoLens.OptionalField maybe'metaInfoDef)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef
                              +                graphDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "graph_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Graph.GraphDef)
                              +                      (Data.ProtoLens.OptionalField maybe'graphDef)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef
                              +                saverDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "saver_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Protobuf.Saver.SaverDef)
                              +                      (Data.ProtoLens.OptionalField maybe'saverDef)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef
                              +                collectionDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "collection_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor MetaGraphDef'CollectionDefEntry)
                              +                      (Data.ProtoLens.MapField key value collectionDef)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef
                              +                signatureDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "signature_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor MetaGraphDef'SignatureDefEntry)
                              +                      (Data.ProtoLens.MapField key value signatureDef)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef
                              +                assetFileDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "asset_file_def"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor AssetFileDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked assetFileDef)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MetaGraphDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, metaInfoDef__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, graphDef__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, saverDef__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, collectionDef__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, signatureDef__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, assetFileDef__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("meta_info_def", metaInfoDef__field_descriptor),
                              +                    ("graph_def", graphDef__field_descriptor),
                              +                    ("saver_def", saverDef__field_descriptor),
                              +                    ("collection_def", collectionDef__field_descriptor),
                              +                    ("signature_def", signatureDef__field_descriptor),
                              +                    ("asset_file_def", assetFileDef__field_descriptor)])
                              +
                              +data MetaGraphDef'CollectionDefEntry = MetaGraphDef'CollectionDefEntry{_MetaGraphDef'CollectionDefEntry'key
                              +                                                                       :: !Data.Text.Text,
                              +                                                                       _MetaGraphDef'CollectionDefEntry'value
                              +                                                                       ::
                              +                                                                       !(Prelude.Maybe
                              +                                                                           CollectionDef)}
                              +                                     deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f MetaGraphDef'CollectionDefEntry
                              +           MetaGraphDef'CollectionDefEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'CollectionDefEntry'key
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'CollectionDefEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ CollectionDef, b ~ CollectionDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f MetaGraphDef'CollectionDefEntry
                              +           MetaGraphDef'CollectionDefEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'CollectionDefEntry'value
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'CollectionDefEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe CollectionDef,
                              +          b ~ Prelude.Maybe CollectionDef, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f MetaGraphDef'CollectionDefEntry
                              +           MetaGraphDef'CollectionDefEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'CollectionDefEntry'value
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'CollectionDefEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MetaGraphDef'CollectionDefEntry
                              +         where
                              +        def
                              +          = MetaGraphDef'CollectionDefEntry{_MetaGraphDef'CollectionDefEntry'key
                              +                                              = Data.ProtoLens.fieldDefault,
                              +                                            _MetaGraphDef'CollectionDefEntry'value =
                              +                                              Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message MetaGraphDef'CollectionDefEntry
                              +         where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'CollectionDefEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CollectionDef)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'CollectionDefEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MetaGraphDef.CollectionDefEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data MetaGraphDef'MetaInfoDef = MetaGraphDef'MetaInfoDef{_MetaGraphDef'MetaInfoDef'metaGraphVersion
                              +                                                         :: !Data.Text.Text,
                              +                                                         _MetaGraphDef'MetaInfoDef'strippedOpList ::
                              +                                                         !(Prelude.Maybe
                              +                                                             Proto.Tensorflow.Core.Framework.OpDef.OpList),
                              +                                                         _MetaGraphDef'MetaInfoDef'anyInfo ::
                              +                                                         !(Prelude.Maybe
                              +                                                             Proto.Google.Protobuf.Any.Any),
                              +                                                         _MetaGraphDef'MetaInfoDef'tags ::
                              +                                                         ![Data.Text.Text],
                              +                                                         _MetaGraphDef'MetaInfoDef'tensorflowVersion
                              +                                                         :: !Data.Text.Text,
                              +                                                         _MetaGraphDef'MetaInfoDef'tensorflowGitVersion
                              +                                                         :: !Data.Text.Text}
                              +                              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "metaGraphVersion" f MetaGraphDef'MetaInfoDef
                              +           MetaGraphDef'MetaInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MetaGraphDef'MetaInfoDef'metaGraphVersion
                              +                 (\ x__ y__ ->
                              +                    x__{_MetaGraphDef'MetaInfoDef'metaGraphVersion = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.OpDef.OpList,
                              +          b ~ Proto.Tensorflow.Core.Framework.OpDef.OpList,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "strippedOpList" f MetaGraphDef'MetaInfoDef
                              +           MetaGraphDef'MetaInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MetaGraphDef'MetaInfoDef'strippedOpList
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'MetaInfoDef'strippedOpList = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.OpDef.OpList,
                              +          b ~ Prelude.Maybe Proto.Tensorflow.Core.Framework.OpDef.OpList,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'strippedOpList" f
                              +           MetaGraphDef'MetaInfoDef
                              +           MetaGraphDef'MetaInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MetaGraphDef'MetaInfoDef'strippedOpList
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'MetaInfoDef'strippedOpList = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Google.Protobuf.Any.Any,
                              +          b ~ Proto.Google.Protobuf.Any.Any, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "anyInfo" f MetaGraphDef'MetaInfoDef
                              +           MetaGraphDef'MetaInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'MetaInfoDef'anyInfo
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'MetaInfoDef'anyInfo = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe Proto.Google.Protobuf.Any.Any,
                              +          b ~ Prelude.Maybe Proto.Google.Protobuf.Any.Any,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'anyInfo" f MetaGraphDef'MetaInfoDef
                              +           MetaGraphDef'MetaInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'MetaInfoDef'anyInfo
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'MetaInfoDef'anyInfo = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tags" f MetaGraphDef'MetaInfoDef
                              +           MetaGraphDef'MetaInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'MetaInfoDef'tags
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'MetaInfoDef'tags = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensorflowVersion" f MetaGraphDef'MetaInfoDef
                              +           MetaGraphDef'MetaInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MetaGraphDef'MetaInfoDef'tensorflowVersion
                              +                 (\ x__ y__ ->
                              +                    x__{_MetaGraphDef'MetaInfoDef'tensorflowVersion = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensorflowGitVersion" f
                              +           MetaGraphDef'MetaInfoDef
                              +           MetaGraphDef'MetaInfoDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MetaGraphDef'MetaInfoDef'tensorflowGitVersion
                              +                 (\ x__ y__ ->
                              +                    x__{_MetaGraphDef'MetaInfoDef'tensorflowGitVersion = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MetaGraphDef'MetaInfoDef where
                              +        def
                              +          = MetaGraphDef'MetaInfoDef{_MetaGraphDef'MetaInfoDef'metaGraphVersion
                              +                                       = Data.ProtoLens.fieldDefault,
                              +                                     _MetaGraphDef'MetaInfoDef'strippedOpList = Prelude.Nothing,
                              +                                     _MetaGraphDef'MetaInfoDef'anyInfo = Prelude.Nothing,
                              +                                     _MetaGraphDef'MetaInfoDef'tags = [],
                              +                                     _MetaGraphDef'MetaInfoDef'tensorflowVersion =
                              +                                       Data.ProtoLens.fieldDefault,
                              +                                     _MetaGraphDef'MetaInfoDef'tensorflowGitVersion =
                              +                                       Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message MetaGraphDef'MetaInfoDef where
                              +        descriptor
                              +          = let metaGraphVersion__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "meta_graph_version"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         metaGraphVersion)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'MetaInfoDef
                              +                strippedOpList__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "stripped_op_list"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.OpDef.OpList)
                              +                      (Data.ProtoLens.OptionalField maybe'strippedOpList)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'MetaInfoDef
                              +                anyInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "any_info"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Proto.Google.Protobuf.Any.Any)
                              +                      (Data.ProtoLens.OptionalField maybe'anyInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'MetaInfoDef
                              +                tags__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tags"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked tags)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'MetaInfoDef
                              +                tensorflowVersion__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensorflow_version"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         tensorflowVersion)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'MetaInfoDef
                              +                tensorflowGitVersion__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensorflow_git_version"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         tensorflowGitVersion)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'MetaInfoDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MetaGraphDef.MetaInfoDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, metaGraphVersion__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, strippedOpList__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, anyInfo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, tags__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, tensorflowVersion__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, tensorflowGitVersion__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("meta_graph_version", metaGraphVersion__field_descriptor),
                              +                    ("stripped_op_list", strippedOpList__field_descriptor),
                              +                    ("any_info", anyInfo__field_descriptor),
                              +                    ("tags", tags__field_descriptor),
                              +                    ("tensorflow_version", tensorflowVersion__field_descriptor),
                              +                    ("tensorflow_git_version",
                              +                     tensorflowGitVersion__field_descriptor)])
                              +
                              +data MetaGraphDef'SignatureDefEntry = MetaGraphDef'SignatureDefEntry{_MetaGraphDef'SignatureDefEntry'key
                              +                                                                     :: !Data.Text.Text,
                              +                                                                     _MetaGraphDef'SignatureDefEntry'value
                              +                                                                     ::
                              +                                                                     !(Prelude.Maybe SignatureDef)}
                              +                                    deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f MetaGraphDef'SignatureDefEntry
                              +           MetaGraphDef'SignatureDefEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'SignatureDefEntry'key
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'SignatureDefEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ SignatureDef, b ~ SignatureDef, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f MetaGraphDef'SignatureDefEntry
                              +           MetaGraphDef'SignatureDefEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'SignatureDefEntry'value
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'SignatureDefEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe SignatureDef,
                              +          b ~ Prelude.Maybe SignatureDef, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f MetaGraphDef'SignatureDefEntry
                              +           MetaGraphDef'SignatureDefEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MetaGraphDef'SignatureDefEntry'value
                              +                 (\ x__ y__ -> x__{_MetaGraphDef'SignatureDefEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MetaGraphDef'SignatureDefEntry
                              +         where
                              +        def
                              +          = MetaGraphDef'SignatureDefEntry{_MetaGraphDef'SignatureDefEntry'key
                              +                                             = Data.ProtoLens.fieldDefault,
                              +                                           _MetaGraphDef'SignatureDefEntry'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message MetaGraphDef'SignatureDefEntry
                              +         where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'SignatureDefEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SignatureDef)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor MetaGraphDef'SignatureDefEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MetaGraphDef.SignatureDefEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data SignatureDef = SignatureDef{_SignatureDef'inputs ::
                              +                                 !(Data.Map.Map Data.Text.Text TensorInfo),
                              +                                 _SignatureDef'outputs :: !(Data.Map.Map Data.Text.Text TensorInfo),
                              +                                 _SignatureDef'methodName :: !Data.Text.Text}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text TensorInfo,
                              +          b ~ Data.Map.Map Data.Text.Text TensorInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "inputs" f SignatureDef SignatureDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SignatureDef'inputs
                              +                 (\ x__ y__ -> x__{_SignatureDef'inputs = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text TensorInfo,
                              +          b ~ Data.Map.Map Data.Text.Text TensorInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "outputs" f SignatureDef SignatureDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SignatureDef'outputs
                              +                 (\ x__ y__ -> x__{_SignatureDef'outputs = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "methodName" f SignatureDef SignatureDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SignatureDef'methodName
                              +                 (\ x__ y__ -> x__{_SignatureDef'methodName = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SignatureDef where
                              +        def
                              +          = SignatureDef{_SignatureDef'inputs = Data.Map.empty,
                              +                         _SignatureDef'outputs = Data.Map.empty,
                              +                         _SignatureDef'methodName = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message SignatureDef where
                              +        descriptor
                              +          = let inputs__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "inputs"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SignatureDef'InputsEntry)
                              +                      (Data.ProtoLens.MapField key value inputs)
                              +                      :: Data.ProtoLens.FieldDescriptor SignatureDef
                              +                outputs__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "outputs"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SignatureDef'OutputsEntry)
                              +                      (Data.ProtoLens.MapField key value outputs)
                              +                      :: Data.ProtoLens.FieldDescriptor SignatureDef
                              +                methodName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "method_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional methodName)
                              +                      :: Data.ProtoLens.FieldDescriptor SignatureDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SignatureDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, inputs__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, outputs__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, methodName__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("inputs", inputs__field_descriptor),
                              +                    ("outputs", outputs__field_descriptor),
                              +                    ("method_name", methodName__field_descriptor)])
                              +
                              +data SignatureDef'InputsEntry = SignatureDef'InputsEntry{_SignatureDef'InputsEntry'key
                              +                                                         :: !Data.Text.Text,
                              +                                                         _SignatureDef'InputsEntry'value ::
                              +                                                         !(Prelude.Maybe TensorInfo)}
                              +                              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f SignatureDef'InputsEntry
                              +           SignatureDef'InputsEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SignatureDef'InputsEntry'key
                              +                 (\ x__ y__ -> x__{_SignatureDef'InputsEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ TensorInfo, b ~ TensorInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f SignatureDef'InputsEntry
                              +           SignatureDef'InputsEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SignatureDef'InputsEntry'value
                              +                 (\ x__ y__ -> x__{_SignatureDef'InputsEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe TensorInfo,
                              +          b ~ Prelude.Maybe TensorInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f SignatureDef'InputsEntry
                              +           SignatureDef'InputsEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SignatureDef'InputsEntry'value
                              +                 (\ x__ y__ -> x__{_SignatureDef'InputsEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SignatureDef'InputsEntry where
                              +        def
                              +          = SignatureDef'InputsEntry{_SignatureDef'InputsEntry'key =
                              +                                       Data.ProtoLens.fieldDefault,
                              +                                     _SignatureDef'InputsEntry'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message SignatureDef'InputsEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor SignatureDef'InputsEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor TensorInfo)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor SignatureDef'InputsEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SignatureDef.InputsEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data SignatureDef'OutputsEntry = SignatureDef'OutputsEntry{_SignatureDef'OutputsEntry'key
                              +                                                           :: !Data.Text.Text,
                              +                                                           _SignatureDef'OutputsEntry'value ::
                              +                                                           !(Prelude.Maybe TensorInfo)}
                              +                               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f SignatureDef'OutputsEntry
                              +           SignatureDef'OutputsEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SignatureDef'OutputsEntry'key
                              +                 (\ x__ y__ -> x__{_SignatureDef'OutputsEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ TensorInfo, b ~ TensorInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f SignatureDef'OutputsEntry
                              +           SignatureDef'OutputsEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SignatureDef'OutputsEntry'value
                              +                 (\ x__ y__ -> x__{_SignatureDef'OutputsEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe TensorInfo,
                              +          b ~ Prelude.Maybe TensorInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f SignatureDef'OutputsEntry
                              +           SignatureDef'OutputsEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SignatureDef'OutputsEntry'value
                              +                 (\ x__ y__ -> x__{_SignatureDef'OutputsEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SignatureDef'OutputsEntry where
                              +        def
                              +          = SignatureDef'OutputsEntry{_SignatureDef'OutputsEntry'key =
                              +                                        Data.ProtoLens.fieldDefault,
                              +                                      _SignatureDef'OutputsEntry'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message SignatureDef'OutputsEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor SignatureDef'OutputsEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor TensorInfo)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor SignatureDef'OutputsEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SignatureDef.OutputsEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data TensorInfo = TensorInfo{_TensorInfo'dtype ::
                              +                             !Proto.Tensorflow.Core.Framework.Types.DataType,
                              +                             _TensorInfo'tensorShape ::
                              +                             !(Prelude.Maybe
                              +                                 Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto),
                              +                             _TensorInfo'encoding :: !(Prelude.Maybe TensorInfo'Encoding)}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data TensorInfo'Encoding = TensorInfo'Name !Data.Text.Text
                              +                         | TensorInfo'CooSparse' !TensorInfo'CooSparse
                              +                         deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "dtype" f TensorInfo TensorInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorInfo'dtype
                              +                 (\ x__ y__ -> x__{_TensorInfo'dtype = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensorShape" f TensorInfo TensorInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorInfo'tensorShape
                              +                 (\ x__ y__ -> x__{_TensorInfo'tensorShape = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'tensorShape" f TensorInfo TensorInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorInfo'tensorShape
                              +                 (\ x__ y__ -> x__{_TensorInfo'tensorShape = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe TensorInfo'Encoding,
                              +          b ~ Prelude.Maybe TensorInfo'Encoding, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'encoding" f TensorInfo TensorInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorInfo'encoding
                              +                 (\ x__ y__ -> x__{_TensorInfo'encoding = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe Data.Text.Text,
                              +          b ~ Prelude.Maybe Data.Text.Text, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'name" f TensorInfo TensorInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorInfo'encoding
                              +                 (\ x__ y__ -> x__{_TensorInfo'encoding = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (TensorInfo'Name x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap TensorInfo'Name y__))
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f TensorInfo TensorInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorInfo'encoding
                              +                 (\ x__ y__ -> x__{_TensorInfo'encoding = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (TensorInfo'Name x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap TensorInfo'Name y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~ Prelude.Maybe TensorInfo'CooSparse,
                              +          b ~ Prelude.Maybe TensorInfo'CooSparse, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'cooSparse" f TensorInfo TensorInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorInfo'encoding
                              +                 (\ x__ y__ -> x__{_TensorInfo'encoding = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (TensorInfo'CooSparse' x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap TensorInfo'CooSparse' y__))
                              +
                              +instance (a ~ TensorInfo'CooSparse, b ~ TensorInfo'CooSparse,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "cooSparse" f TensorInfo TensorInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorInfo'encoding
                              +                 (\ x__ y__ -> x__{_TensorInfo'encoding = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (TensorInfo'CooSparse' x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap TensorInfo'CooSparse' y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance Data.Default.Class.Default TensorInfo where
                              +        def
                              +          = TensorInfo{_TensorInfo'dtype = Data.Default.Class.def,
                              +                       _TensorInfo'tensorShape = Prelude.Nothing,
                              +                       _TensorInfo'encoding = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message TensorInfo where
                              +        descriptor
                              +          = let dtype__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dtype"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorInfo
                              +                tensorShape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor_shape"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
                              +                      (Data.ProtoLens.OptionalField maybe'tensorShape)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorInfo
                              +                name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.OptionalField maybe'name)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorInfo
                              +                cooSparse__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "coo_sparse"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor TensorInfo'CooSparse)
                              +                      (Data.ProtoLens.OptionalField maybe'cooSparse)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorInfo
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TensorInfo")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 2, dtype__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, tensorShape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, cooSparse__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("dtype", dtype__field_descriptor),
                              +                    ("tensor_shape", tensorShape__field_descriptor),
                              +                    ("name", name__field_descriptor),
                              +                    ("coo_sparse", cooSparse__field_descriptor)])
                              +
                              +data TensorInfo'CooSparse = TensorInfo'CooSparse{_TensorInfo'CooSparse'valuesTensorName
                              +                                                 :: !Data.Text.Text,
                              +                                                 _TensorInfo'CooSparse'indicesTensorName ::
                              +                                                 !Data.Text.Text,
                              +                                                 _TensorInfo'CooSparse'denseShapeTensorName ::
                              +                                                 !Data.Text.Text}
                              +                          deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "valuesTensorName" f TensorInfo'CooSparse
                              +           TensorInfo'CooSparse
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TensorInfo'CooSparse'valuesTensorName
                              +                 (\ x__ y__ -> x__{_TensorInfo'CooSparse'valuesTensorName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "indicesTensorName" f TensorInfo'CooSparse
                              +           TensorInfo'CooSparse
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _TensorInfo'CooSparse'indicesTensorName
                              +                 (\ x__ y__ -> x__{_TensorInfo'CooSparse'indicesTensorName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "denseShapeTensorName" f TensorInfo'CooSparse
                              +           TensorInfo'CooSparse
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _TensorInfo'CooSparse'denseShapeTensorName
                              +                 (\ x__ y__ ->
                              +                    x__{_TensorInfo'CooSparse'denseShapeTensorName = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default TensorInfo'CooSparse where
                              +        def
                              +          = TensorInfo'CooSparse{_TensorInfo'CooSparse'valuesTensorName =
                              +                                   Data.ProtoLens.fieldDefault,
                              +                                 _TensorInfo'CooSparse'indicesTensorName =
                              +                                   Data.ProtoLens.fieldDefault,
                              +                                 _TensorInfo'CooSparse'denseShapeTensorName =
                              +                                   Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message TensorInfo'CooSparse where
                              +        descriptor
                              +          = let valuesTensorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "values_tensor_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         valuesTensorName)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorInfo'CooSparse
                              +                indicesTensorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "indices_tensor_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         indicesTensorName)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorInfo'CooSparse
                              +                denseShapeTensorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dense_shape_tensor_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         denseShapeTensorName)
                              +                      :: Data.ProtoLens.FieldDescriptor TensorInfo'CooSparse
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TensorInfo.CooSparse")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, valuesTensorName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, indicesTensorName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, denseShapeTensorName__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("values_tensor_name", valuesTensorName__field_descriptor),
                              +                    ("indices_tensor_name", indicesTensorName__field_descriptor),
                              +                    ("dense_shape_tensor_name",
                              +                     denseShapeTensorName__field_descriptor)])
                              +
                              +anyInfo ::
                              +        forall f s t a b . (Lens.Labels.HasLens "anyInfo" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +anyInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "anyInfo")
                              +
                              +anyList ::
                              +        forall f s t a b . (Lens.Labels.HasLens "anyList" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +anyList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "anyList")
                              +
                              +assetFileDef ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "assetFileDef" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +assetFileDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "assetFileDef")
                              +
                              +bytesList ::
                              +          forall f s t a b . (Lens.Labels.HasLens "bytesList" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +bytesList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "bytesList")
                              +
                              +collectionDef ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "collectionDef" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +collectionDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "collectionDef")
                              +
                              +cooSparse ::
                              +          forall f s t a b . (Lens.Labels.HasLens "cooSparse" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +cooSparse
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "cooSparse")
                              +
                              +denseShapeTensorName ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "denseShapeTensorName" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +denseShapeTensorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "denseShapeTensorName")
                              +
                              +dtype ::
                              +      forall f s t a b . (Lens.Labels.HasLens "dtype" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +dtype
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "dtype")
                              +
                              +filename ::
                              +         forall f s t a b . (Lens.Labels.HasLens "filename" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +filename
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "filename")
                              +
                              +floatList ::
                              +          forall f s t a b . (Lens.Labels.HasLens "floatList" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +floatList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "floatList")
                              +
                              +graphDef ::
                              +         forall f s t a b . (Lens.Labels.HasLens "graphDef" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +graphDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "graphDef")
                              +
                              +indicesTensorName ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "indicesTensorName" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +indicesTensorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "indicesTensorName")
                              +
                              +inputs ::
                              +       forall f s t a b . (Lens.Labels.HasLens "inputs" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +inputs
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "inputs")
                              +
                              +int64List ::
                              +          forall f s t a b . (Lens.Labels.HasLens "int64List" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +int64List
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "int64List")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +maybe'anyInfo ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybe'anyInfo" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybe'anyInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'anyInfo")
                              +
                              +maybe'anyList ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybe'anyList" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybe'anyList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'anyList")
                              +
                              +maybe'bytesList ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'bytesList" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'bytesList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'bytesList")
                              +
                              +maybe'cooSparse ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'cooSparse" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'cooSparse
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'cooSparse")
                              +
                              +maybe'encoding ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'encoding" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'encoding
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'encoding")
                              +
                              +maybe'floatList ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'floatList" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'floatList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'floatList")
                              +
                              +maybe'graphDef ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'graphDef" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'graphDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'graphDef")
                              +
                              +maybe'int64List ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "maybe'int64List" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +maybe'int64List
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'int64List")
                              +
                              +maybe'kind ::
                              +           forall f s t a b . (Lens.Labels.HasLens "maybe'kind" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +maybe'kind
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'kind")
                              +
                              +maybe'metaInfoDef ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'metaInfoDef" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'metaInfoDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'metaInfoDef")
                              +
                              +maybe'name ::
                              +           forall f s t a b . (Lens.Labels.HasLens "maybe'name" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +maybe'name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'name")
                              +
                              +maybe'nodeList ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'nodeList" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'nodeList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'nodeList")
                              +
                              +maybe'saverDef ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'saverDef" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'saverDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'saverDef")
                              +
                              +maybe'strippedOpList ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "maybe'strippedOpList" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +maybe'strippedOpList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'strippedOpList")
                              +
                              +maybe'tensorInfo ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "maybe'tensorInfo" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +maybe'tensorInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'tensorInfo")
                              +
                              +maybe'tensorShape ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'tensorShape" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'tensorShape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'tensorShape")
                              +
                              +maybe'value ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'value" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'value")
                              +
                              +metaGraphVersion ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "metaGraphVersion" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +metaGraphVersion
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "metaGraphVersion")
                              +
                              +metaInfoDef ::
                              +            forall f s t a b . (Lens.Labels.HasLens "metaInfoDef" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +metaInfoDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "metaInfoDef")
                              +
                              +methodName ::
                              +           forall f s t a b . (Lens.Labels.HasLens "methodName" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +methodName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "methodName")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +nodeList ::
                              +         forall f s t a b . (Lens.Labels.HasLens "nodeList" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +nodeList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "nodeList")
                              +
                              +outputs ::
                              +        forall f s t a b . (Lens.Labels.HasLens "outputs" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +outputs
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "outputs")
                              +
                              +saverDef ::
                              +         forall f s t a b . (Lens.Labels.HasLens "saverDef" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +saverDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "saverDef")
                              +
                              +signatureDef ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "signatureDef" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +signatureDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "signatureDef")
                              +
                              +strippedOpList ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "strippedOpList" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +strippedOpList
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "strippedOpList")
                              +
                              +tags ::
                              +     forall f s t a b . (Lens.Labels.HasLens "tags" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +tags
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tags")
                              +
                              +tensorInfo ::
                              +           forall f s t a b . (Lens.Labels.HasLens "tensorInfo" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +tensorInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensorInfo")
                              +
                              +tensorShape ::
                              +            forall f s t a b . (Lens.Labels.HasLens "tensorShape" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +tensorShape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensorShape")
                              +
                              +tensorflowGitVersion ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "tensorflowGitVersion" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +tensorflowGitVersion
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "tensorflowGitVersion")
                              +
                              +tensorflowVersion ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "tensorflowVersion" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +tensorflowVersion
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensorflowVersion")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              +
                              +valuesTensorName ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "valuesTensorName" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +valuesTensorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "valuesTensorName")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.NamedTensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.NamedTensor.html new file mode 100644 index 0000000..2a14ed2 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.NamedTensor.html @@ -0,0 +1,125 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/named_tensor.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.NamedTensor where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.Tensor
                              +
                              +data NamedTensorProto = NamedTensorProto{_NamedTensorProto'name ::
                              +                                         !Data.Text.Text,
                              +                                         _NamedTensorProto'tensor ::
                              +                                         !(Prelude.Maybe
                              +                                             Proto.Tensorflow.Core.Framework.Tensor.TensorProto)}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f NamedTensorProto NamedTensorProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NamedTensorProto'name
                              +                 (\ x__ y__ -> x__{_NamedTensorProto'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensor" f NamedTensorProto NamedTensorProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NamedTensorProto'tensor
                              +                 (\ x__ y__ -> x__{_NamedTensorProto'tensor = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'tensor" f NamedTensorProto
                              +           NamedTensorProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _NamedTensorProto'tensor
                              +                 (\ x__ y__ -> x__{_NamedTensorProto'tensor = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default NamedTensorProto where
                              +        def
                              +          = NamedTensorProto{_NamedTensorProto'name =
                              +                               Data.ProtoLens.fieldDefault,
                              +                             _NamedTensorProto'tensor = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message NamedTensorProto where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor NamedTensorProto
                              +                tensor__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
                              +                      (Data.ProtoLens.OptionalField maybe'tensor)
                              +                      :: Data.ProtoLens.FieldDescriptor NamedTensorProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.NamedTensorProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, tensor__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("tensor", tensor__field_descriptor)])
                              +
                              +maybe'tensor ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "maybe'tensor" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +maybe'tensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'tensor")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +tensor ::
                              +       forall f s t a b . (Lens.Labels.HasLens "tensor" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +tensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensor")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.QueueRunner.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.QueueRunner.html new file mode 100644 index 0000000..5a89efd --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.QueueRunner.html @@ -0,0 +1,198 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/queue_runner.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.QueueRunner where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Lib.Core.ErrorCodes
                              +
                              +data QueueRunnerDef = QueueRunnerDef{_QueueRunnerDef'queueName ::
                              +                                     !Data.Text.Text,
                              +                                     _QueueRunnerDef'enqueueOpName :: ![Data.Text.Text],
                              +                                     _QueueRunnerDef'closeOpName :: !Data.Text.Text,
                              +                                     _QueueRunnerDef'cancelOpName :: !Data.Text.Text,
                              +                                     _QueueRunnerDef'queueClosedExceptionTypes ::
                              +                                     ![Proto.Tensorflow.Core.Lib.Core.ErrorCodes.Code]}
                              +                    deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "queueName" f QueueRunnerDef QueueRunnerDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _QueueRunnerDef'queueName
                              +                 (\ x__ y__ -> x__{_QueueRunnerDef'queueName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "enqueueOpName" f QueueRunnerDef QueueRunnerDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _QueueRunnerDef'enqueueOpName
                              +                 (\ x__ y__ -> x__{_QueueRunnerDef'enqueueOpName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "closeOpName" f QueueRunnerDef QueueRunnerDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _QueueRunnerDef'closeOpName
                              +                 (\ x__ y__ -> x__{_QueueRunnerDef'closeOpName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "cancelOpName" f QueueRunnerDef QueueRunnerDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _QueueRunnerDef'cancelOpName
                              +                 (\ x__ y__ -> x__{_QueueRunnerDef'cancelOpName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Proto.Tensorflow.Core.Lib.Core.ErrorCodes.Code],
                              +          b ~ [Proto.Tensorflow.Core.Lib.Core.ErrorCodes.Code],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "queueClosedExceptionTypes" f QueueRunnerDef
                              +           QueueRunnerDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _QueueRunnerDef'queueClosedExceptionTypes
                              +                 (\ x__ y__ ->
                              +                    x__{_QueueRunnerDef'queueClosedExceptionTypes = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default QueueRunnerDef where
                              +        def
                              +          = QueueRunnerDef{_QueueRunnerDef'queueName =
                              +                             Data.ProtoLens.fieldDefault,
                              +                           _QueueRunnerDef'enqueueOpName = [],
                              +                           _QueueRunnerDef'closeOpName = Data.ProtoLens.fieldDefault,
                              +                           _QueueRunnerDef'cancelOpName = Data.ProtoLens.fieldDefault,
                              +                           _QueueRunnerDef'queueClosedExceptionTypes = []}
                              +
                              +instance Data.ProtoLens.Message QueueRunnerDef where
                              +        descriptor
                              +          = let queueName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "queue_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional queueName)
                              +                      :: Data.ProtoLens.FieldDescriptor QueueRunnerDef
                              +                enqueueOpName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "enqueue_op_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         enqueueOpName)
                              +                      :: Data.ProtoLens.FieldDescriptor QueueRunnerDef
                              +                closeOpName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "close_op_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional closeOpName)
                              +                      :: Data.ProtoLens.FieldDescriptor QueueRunnerDef
                              +                cancelOpName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cancel_op_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional cancelOpName)
                              +                      :: Data.ProtoLens.FieldDescriptor QueueRunnerDef
                              +                queueClosedExceptionTypes__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "queue_closed_exception_types"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Lib.Core.ErrorCodes.Code)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed
                              +                         queueClosedExceptionTypes)
                              +                      :: Data.ProtoLens.FieldDescriptor QueueRunnerDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.QueueRunnerDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, queueName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, enqueueOpName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, closeOpName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, cancelOpName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5,
                              +                     queueClosedExceptionTypes__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("queue_name", queueName__field_descriptor),
                              +                    ("enqueue_op_name", enqueueOpName__field_descriptor),
                              +                    ("close_op_name", closeOpName__field_descriptor),
                              +                    ("cancel_op_name", cancelOpName__field_descriptor),
                              +                    ("queue_closed_exception_types",
                              +                     queueClosedExceptionTypes__field_descriptor)])
                              +
                              +cancelOpName ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "cancelOpName" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +cancelOpName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "cancelOpName")
                              +
                              +closeOpName ::
                              +            forall f s t a b . (Lens.Labels.HasLens "closeOpName" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +closeOpName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "closeOpName")
                              +
                              +enqueueOpName ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "enqueueOpName" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +enqueueOpName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "enqueueOpName")
                              +
                              +queueClosedExceptionTypes ::
                              +                          forall f s t a b .
                              +                            (Lens.Labels.HasLens "queueClosedExceptionTypes" f s t a b) =>
                              +                            Lens.Family2.LensLike f s t a b
                              +queueClosedExceptionTypes
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "queueClosedExceptionTypes")
                              +
                              +queueName ::
                              +          forall f s t a b . (Lens.Labels.HasLens "queueName" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +queueName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "queueName")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.RewriterConfig.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.RewriterConfig.html new file mode 100644 index 0000000..6d4ad26 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.RewriterConfig.html @@ -0,0 +1,378 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/rewriter_config.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.RewriterConfig where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data AutoParallelOptions = AutoParallelOptions{_AutoParallelOptions'enable
                              +                                               :: !Prelude.Bool,
                              +                                               _AutoParallelOptions'numReplicas :: !Data.Int.Int32}
                              +                         deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "enable" f AutoParallelOptions
                              +           AutoParallelOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AutoParallelOptions'enable
                              +                 (\ x__ y__ -> x__{_AutoParallelOptions'enable = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "numReplicas" f AutoParallelOptions
                              +           AutoParallelOptions
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AutoParallelOptions'numReplicas
                              +                 (\ x__ y__ -> x__{_AutoParallelOptions'numReplicas = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default AutoParallelOptions where
                              +        def
                              +          = AutoParallelOptions{_AutoParallelOptions'enable =
                              +                                  Data.ProtoLens.fieldDefault,
                              +                                _AutoParallelOptions'numReplicas = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message AutoParallelOptions where
                              +        descriptor
                              +          = let enable__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "enable"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional enable)
                              +                      :: Data.ProtoLens.FieldDescriptor AutoParallelOptions
                              +                numReplicas__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "num_replicas"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numReplicas)
                              +                      :: Data.ProtoLens.FieldDescriptor AutoParallelOptions
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.AutoParallelOptions")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, enable__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, numReplicas__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("enable", enable__field_descriptor),
                              +                    ("num_replicas", numReplicas__field_descriptor)])
                              +
                              +data RewriterConfig = RewriterConfig{_RewriterConfig'optimizeTensorLayout
                              +                                     :: !Prelude.Bool,
                              +                                     _RewriterConfig'disableModelPruning :: !Prelude.Bool,
                              +                                     _RewriterConfig'constantFolding :: !Prelude.Bool,
                              +                                     _RewriterConfig'memoryOptimization ::
                              +                                     !RewriterConfig'MemOptType,
                              +                                     _RewriterConfig'autoParallel ::
                              +                                     !(Prelude.Maybe AutoParallelOptions),
                              +                                     _RewriterConfig'optimizers :: ![Data.Text.Text]}
                              +                    deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "optimizeTensorLayout" f RewriterConfig
                              +           RewriterConfig
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RewriterConfig'optimizeTensorLayout
                              +                 (\ x__ y__ -> x__{_RewriterConfig'optimizeTensorLayout = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "disableModelPruning" f RewriterConfig
                              +           RewriterConfig
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RewriterConfig'disableModelPruning
                              +                 (\ x__ y__ -> x__{_RewriterConfig'disableModelPruning = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "constantFolding" f RewriterConfig
                              +           RewriterConfig
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RewriterConfig'constantFolding
                              +                 (\ x__ y__ -> x__{_RewriterConfig'constantFolding = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ RewriterConfig'MemOptType,
                              +          b ~ RewriterConfig'MemOptType, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "memoryOptimization" f RewriterConfig
                              +           RewriterConfig
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RewriterConfig'memoryOptimization
                              +                 (\ x__ y__ -> x__{_RewriterConfig'memoryOptimization = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ AutoParallelOptions, b ~ AutoParallelOptions,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "autoParallel" f RewriterConfig RewriterConfig
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RewriterConfig'autoParallel
                              +                 (\ x__ y__ -> x__{_RewriterConfig'autoParallel = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe AutoParallelOptions,
                              +          b ~ Prelude.Maybe AutoParallelOptions, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'autoParallel" f RewriterConfig
                              +           RewriterConfig
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RewriterConfig'autoParallel
                              +                 (\ x__ y__ -> x__{_RewriterConfig'autoParallel = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "optimizers" f RewriterConfig RewriterConfig a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RewriterConfig'optimizers
                              +                 (\ x__ y__ -> x__{_RewriterConfig'optimizers = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default RewriterConfig where
                              +        def
                              +          = RewriterConfig{_RewriterConfig'optimizeTensorLayout =
                              +                             Data.ProtoLens.fieldDefault,
                              +                           _RewriterConfig'disableModelPruning = Data.ProtoLens.fieldDefault,
                              +                           _RewriterConfig'constantFolding = Data.ProtoLens.fieldDefault,
                              +                           _RewriterConfig'memoryOptimization = Data.Default.Class.def,
                              +                           _RewriterConfig'autoParallel = Prelude.Nothing,
                              +                           _RewriterConfig'optimizers = []}
                              +
                              +instance Data.ProtoLens.Message RewriterConfig where
                              +        descriptor
                              +          = let optimizeTensorLayout__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "optimize_tensor_layout"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         optimizeTensorLayout)
                              +                      :: Data.ProtoLens.FieldDescriptor RewriterConfig
                              +                disableModelPruning__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "disable_model_pruning"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         disableModelPruning)
                              +                      :: Data.ProtoLens.FieldDescriptor RewriterConfig
                              +                constantFolding__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "constant_folding"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional constantFolding)
                              +                      :: Data.ProtoLens.FieldDescriptor RewriterConfig
                              +                memoryOptimization__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "memory_optimization"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor RewriterConfig'MemOptType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         memoryOptimization)
                              +                      :: Data.ProtoLens.FieldDescriptor RewriterConfig
                              +                autoParallel__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "auto_parallel"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor AutoParallelOptions)
                              +                      (Data.ProtoLens.OptionalField maybe'autoParallel)
                              +                      :: Data.ProtoLens.FieldDescriptor RewriterConfig
                              +                optimizers__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "optimizers"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked optimizers)
                              +                      :: Data.ProtoLens.FieldDescriptor RewriterConfig
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.RewriterConfig")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, optimizeTensorLayout__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, disableModelPruning__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, constantFolding__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, memoryOptimization__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, autoParallel__field_descriptor),
                              +                    (Data.ProtoLens.Tag 100, optimizers__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("optimize_tensor_layout",
                              +                     optimizeTensorLayout__field_descriptor),
                              +                    ("disable_model_pruning", disableModelPruning__field_descriptor),
                              +                    ("constant_folding", constantFolding__field_descriptor),
                              +                    ("memory_optimization", memoryOptimization__field_descriptor),
                              +                    ("auto_parallel", autoParallel__field_descriptor),
                              +                    ("optimizers", optimizers__field_descriptor)])
                              +
                              +data RewriterConfig'MemOptType = RewriterConfig'NO_MEM_OPT
                              +                               | RewriterConfig'MANUAL
                              +                               | RewriterConfig'HEURISTICS
                              +                               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default RewriterConfig'MemOptType where
                              +        def = RewriterConfig'NO_MEM_OPT
                              +
                              +instance Data.ProtoLens.FieldDefault RewriterConfig'MemOptType
                              +         where
                              +        fieldDefault = RewriterConfig'NO_MEM_OPT
                              +
                              +instance Data.ProtoLens.MessageEnum RewriterConfig'MemOptType where
                              +        maybeToEnum 0 = Prelude.Just RewriterConfig'NO_MEM_OPT
                              +        maybeToEnum 1 = Prelude.Just RewriterConfig'MANUAL
                              +        maybeToEnum 2 = Prelude.Just RewriterConfig'HEURISTICS
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum RewriterConfig'NO_MEM_OPT = "NO_MEM_OPT"
                              +        showEnum RewriterConfig'MANUAL = "MANUAL"
                              +        showEnum RewriterConfig'HEURISTICS = "HEURISTICS"
                              +        readEnum "NO_MEM_OPT" = Prelude.Just RewriterConfig'NO_MEM_OPT
                              +        readEnum "MANUAL" = Prelude.Just RewriterConfig'MANUAL
                              +        readEnum "HEURISTICS" = Prelude.Just RewriterConfig'HEURISTICS
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum RewriterConfig'MemOptType where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum MemOptType: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum RewriterConfig'NO_MEM_OPT = 0
                              +        fromEnum RewriterConfig'MANUAL = 1
                              +        fromEnum RewriterConfig'HEURISTICS = 2
                              +        succ RewriterConfig'HEURISTICS
                              +          = Prelude.error
                              +              "RewriterConfig'MemOptType.succ: bad argument RewriterConfig'HEURISTICS. This value would be out of bounds."
                              +        succ RewriterConfig'NO_MEM_OPT = RewriterConfig'MANUAL
                              +        succ RewriterConfig'MANUAL = RewriterConfig'HEURISTICS
                              +        pred RewriterConfig'NO_MEM_OPT
                              +          = Prelude.error
                              +              "RewriterConfig'MemOptType.pred: bad argument RewriterConfig'NO_MEM_OPT. This value would be out of bounds."
                              +        pred RewriterConfig'MANUAL = RewriterConfig'NO_MEM_OPT
                              +        pred RewriterConfig'HEURISTICS = RewriterConfig'MANUAL
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded RewriterConfig'MemOptType where
                              +        minBound = RewriterConfig'NO_MEM_OPT
                              +        maxBound = RewriterConfig'HEURISTICS
                              +
                              +autoParallel ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "autoParallel" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +autoParallel
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "autoParallel")
                              +
                              +constantFolding ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "constantFolding" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +constantFolding
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "constantFolding")
                              +
                              +disableModelPruning ::
                              +                    forall f s t a b .
                              +                      (Lens.Labels.HasLens "disableModelPruning" f s t a b) =>
                              +                      Lens.Family2.LensLike f s t a b
                              +disableModelPruning
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "disableModelPruning")
                              +
                              +enable ::
                              +       forall f s t a b . (Lens.Labels.HasLens "enable" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +enable
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "enable")
                              +
                              +maybe'autoParallel ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "maybe'autoParallel" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +maybe'autoParallel
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'autoParallel")
                              +
                              +memoryOptimization ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "memoryOptimization" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +memoryOptimization
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "memoryOptimization")
                              +
                              +numReplicas ::
                              +            forall f s t a b . (Lens.Labels.HasLens "numReplicas" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +numReplicas
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "numReplicas")
                              +
                              +optimizeTensorLayout ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "optimizeTensorLayout" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +optimizeTensorLayout
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "optimizeTensorLayout")
                              +
                              +optimizers ::
                              +           forall f s t a b . (Lens.Labels.HasLens "optimizers" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +optimizers
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "optimizers")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.SavedModel.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.SavedModel.html new file mode 100644 index 0000000..4eb76ea --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.SavedModel.html @@ -0,0 +1,107 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/saved_model.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.SavedModel where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Protobuf.MetaGraph
                              +
                              +data SavedModel = SavedModel{_SavedModel'savedModelSchemaVersion ::
                              +                             !Data.Int.Int64,
                              +                             _SavedModel'metaGraphs ::
                              +                             ![Proto.Tensorflow.Core.Protobuf.MetaGraph.MetaGraphDef]}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "savedModelSchemaVersion" f SavedModel
                              +           SavedModel
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedModel'savedModelSchemaVersion
                              +                 (\ x__ y__ -> x__{_SavedModel'savedModelSchemaVersion = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            [Proto.Tensorflow.Core.Protobuf.MetaGraph.MetaGraphDef],
                              +          b ~ [Proto.Tensorflow.Core.Protobuf.MetaGraph.MetaGraphDef],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "metaGraphs" f SavedModel SavedModel a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedModel'metaGraphs
                              +                 (\ x__ y__ -> x__{_SavedModel'metaGraphs = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SavedModel where
                              +        def
                              +          = SavedModel{_SavedModel'savedModelSchemaVersion =
                              +                         Data.ProtoLens.fieldDefault,
                              +                       _SavedModel'metaGraphs = []}
                              +
                              +instance Data.ProtoLens.Message SavedModel where
                              +        descriptor
                              +          = let savedModelSchemaVersion__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "saved_model_schema_version"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         savedModelSchemaVersion)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedModel
                              +                metaGraphs__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "meta_graphs"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Protobuf.MetaGraph.MetaGraphDef)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked metaGraphs)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedModel
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SavedModel")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, savedModelSchemaVersion__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, metaGraphs__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("saved_model_schema_version",
                              +                     savedModelSchemaVersion__field_descriptor),
                              +                    ("meta_graphs", metaGraphs__field_descriptor)])
                              +
                              +metaGraphs ::
                              +           forall f s t a b . (Lens.Labels.HasLens "metaGraphs" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +metaGraphs
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "metaGraphs")
                              +
                              +savedModelSchemaVersion ::
                              +                        forall f s t a b .
                              +                          (Lens.Labels.HasLens "savedModelSchemaVersion" f s t a b) =>
                              +                          Lens.Family2.LensLike f s t a b
                              +savedModelSchemaVersion
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "savedModelSchemaVersion")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Saver.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Saver.html new file mode 100644 index 0000000..180c3ad --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.Saver.html @@ -0,0 +1,302 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/saver.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.Saver where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data SaverDef = SaverDef{_SaverDef'filenameTensorName ::
                              +                         !Data.Text.Text,
                              +                         _SaverDef'saveTensorName :: !Data.Text.Text,
                              +                         _SaverDef'restoreOpName :: !Data.Text.Text,
                              +                         _SaverDef'maxToKeep :: !Data.Int.Int32,
                              +                         _SaverDef'sharded :: !Prelude.Bool,
                              +                         _SaverDef'keepCheckpointEveryNHours :: !Prelude.Float,
                              +                         _SaverDef'version :: !SaverDef'CheckpointFormatVersion}
                              +              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "filenameTensorName" f SaverDef SaverDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaverDef'filenameTensorName
                              +                 (\ x__ y__ -> x__{_SaverDef'filenameTensorName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "saveTensorName" f SaverDef SaverDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaverDef'saveTensorName
                              +                 (\ x__ y__ -> x__{_SaverDef'saveTensorName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "restoreOpName" f SaverDef SaverDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaverDef'restoreOpName
                              +                 (\ x__ y__ -> x__{_SaverDef'restoreOpName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maxToKeep" f SaverDef SaverDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaverDef'maxToKeep
                              +                 (\ x__ y__ -> x__{_SaverDef'maxToKeep = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Bool, b ~ Prelude.Bool, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "sharded" f SaverDef SaverDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaverDef'sharded
                              +                 (\ x__ y__ -> x__{_SaverDef'sharded = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Float, b ~ Prelude.Float,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "keepCheckpointEveryNHours" f SaverDef SaverDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaverDef'keepCheckpointEveryNHours
                              +                 (\ x__ y__ -> x__{_SaverDef'keepCheckpointEveryNHours = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ SaverDef'CheckpointFormatVersion,
                              +          b ~ SaverDef'CheckpointFormatVersion, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "version" f SaverDef SaverDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SaverDef'version
                              +                 (\ x__ y__ -> x__{_SaverDef'version = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SaverDef where
                              +        def
                              +          = SaverDef{_SaverDef'filenameTensorName =
                              +                       Data.ProtoLens.fieldDefault,
                              +                     _SaverDef'saveTensorName = Data.ProtoLens.fieldDefault,
                              +                     _SaverDef'restoreOpName = Data.ProtoLens.fieldDefault,
                              +                     _SaverDef'maxToKeep = Data.ProtoLens.fieldDefault,
                              +                     _SaverDef'sharded = Data.ProtoLens.fieldDefault,
                              +                     _SaverDef'keepCheckpointEveryNHours = Data.ProtoLens.fieldDefault,
                              +                     _SaverDef'version = Data.Default.Class.def}
                              +
                              +instance Data.ProtoLens.Message SaverDef where
                              +        descriptor
                              +          = let filenameTensorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "filename_tensor_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         filenameTensorName)
                              +                      :: Data.ProtoLens.FieldDescriptor SaverDef
                              +                saveTensorName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "save_tensor_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional saveTensorName)
                              +                      :: Data.ProtoLens.FieldDescriptor SaverDef
                              +                restoreOpName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "restore_op_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional restoreOpName)
                              +                      :: Data.ProtoLens.FieldDescriptor SaverDef
                              +                maxToKeep__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "max_to_keep"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional maxToKeep)
                              +                      :: Data.ProtoLens.FieldDescriptor SaverDef
                              +                sharded__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "sharded"
                              +                      (Data.ProtoLens.BoolField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional sharded)
                              +                      :: Data.ProtoLens.FieldDescriptor SaverDef
                              +                keepCheckpointEveryNHours__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "keep_checkpoint_every_n_hours"
                              +                      (Data.ProtoLens.FloatField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         keepCheckpointEveryNHours)
                              +                      :: Data.ProtoLens.FieldDescriptor SaverDef
                              +                version__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "version"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           SaverDef'CheckpointFormatVersion)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional version)
                              +                      :: Data.ProtoLens.FieldDescriptor SaverDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SaverDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, filenameTensorName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, saveTensorName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, restoreOpName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, maxToKeep__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, sharded__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6,
                              +                     keepCheckpointEveryNHours__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, version__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("filename_tensor_name", filenameTensorName__field_descriptor),
                              +                    ("save_tensor_name", saveTensorName__field_descriptor),
                              +                    ("restore_op_name", restoreOpName__field_descriptor),
                              +                    ("max_to_keep", maxToKeep__field_descriptor),
                              +                    ("sharded", sharded__field_descriptor),
                              +                    ("keep_checkpoint_every_n_hours",
                              +                     keepCheckpointEveryNHours__field_descriptor),
                              +                    ("version", version__field_descriptor)])
                              +
                              +data SaverDef'CheckpointFormatVersion = SaverDef'LEGACY
                              +                                      | SaverDef'V1
                              +                                      | SaverDef'V2
                              +                                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default
                              +           SaverDef'CheckpointFormatVersion
                              +         where
                              +        def = SaverDef'LEGACY
                              +
                              +instance Data.ProtoLens.FieldDefault
                              +           SaverDef'CheckpointFormatVersion
                              +         where
                              +        fieldDefault = SaverDef'LEGACY
                              +
                              +instance Data.ProtoLens.MessageEnum
                              +           SaverDef'CheckpointFormatVersion
                              +         where
                              +        maybeToEnum 0 = Prelude.Just SaverDef'LEGACY
                              +        maybeToEnum 1 = Prelude.Just SaverDef'V1
                              +        maybeToEnum 2 = Prelude.Just SaverDef'V2
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum SaverDef'LEGACY = "LEGACY"
                              +        showEnum SaverDef'V1 = "V1"
                              +        showEnum SaverDef'V2 = "V2"
                              +        readEnum "LEGACY" = Prelude.Just SaverDef'LEGACY
                              +        readEnum "V1" = Prelude.Just SaverDef'V1
                              +        readEnum "V2" = Prelude.Just SaverDef'V2
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum SaverDef'CheckpointFormatVersion where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++)
                              +                    "toEnum: unknown value for enum CheckpointFormatVersion: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum SaverDef'LEGACY = 0
                              +        fromEnum SaverDef'V1 = 1
                              +        fromEnum SaverDef'V2 = 2
                              +        succ SaverDef'V2
                              +          = Prelude.error
                              +              "SaverDef'CheckpointFormatVersion.succ: bad argument SaverDef'V2. This value would be out of bounds."
                              +        succ SaverDef'LEGACY = SaverDef'V1
                              +        succ SaverDef'V1 = SaverDef'V2
                              +        pred SaverDef'LEGACY
                              +          = Prelude.error
                              +              "SaverDef'CheckpointFormatVersion.pred: bad argument SaverDef'LEGACY. This value would be out of bounds."
                              +        pred SaverDef'V1 = SaverDef'LEGACY
                              +        pred SaverDef'V2 = SaverDef'V1
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded SaverDef'CheckpointFormatVersion where
                              +        minBound = SaverDef'LEGACY
                              +        maxBound = SaverDef'V2
                              +
                              +filenameTensorName ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "filenameTensorName" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +filenameTensorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "filenameTensorName")
                              +
                              +keepCheckpointEveryNHours ::
                              +                          forall f s t a b .
                              +                            (Lens.Labels.HasLens "keepCheckpointEveryNHours" f s t a b) =>
                              +                            Lens.Family2.LensLike f s t a b
                              +keepCheckpointEveryNHours
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "keepCheckpointEveryNHours")
                              +
                              +maxToKeep ::
                              +          forall f s t a b . (Lens.Labels.HasLens "maxToKeep" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +maxToKeep
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maxToKeep")
                              +
                              +restoreOpName ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "restoreOpName" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +restoreOpName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "restoreOpName")
                              +
                              +saveTensorName ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "saveTensorName" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +saveTensorName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "saveTensorName")
                              +
                              +sharded ::
                              +        forall f s t a b . (Lens.Labels.HasLens "sharded" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +sharded
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "sharded")
                              +
                              +version ::
                              +        forall f s t a b . (Lens.Labels.HasLens "version" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +version
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "version")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.TensorBundle.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.TensorBundle.html new file mode 100644 index 0000000..f5f6b48 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.TensorBundle.html @@ -0,0 +1,457 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/tensor_bundle.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.TensorBundle where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.TensorShape
                              +import qualified Proto.Tensorflow.Core.Framework.TensorSlice
                              +import qualified Proto.Tensorflow.Core.Framework.Types
                              +import qualified Proto.Tensorflow.Core.Framework.Versions
                              +
                              +data BundleEntryProto = BundleEntryProto{_BundleEntryProto'dtype ::
                              +                                         !Proto.Tensorflow.Core.Framework.Types.DataType,
                              +                                         _BundleEntryProto'shape ::
                              +                                         !(Prelude.Maybe
                              +                                             Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto),
                              +                                         _BundleEntryProto'shardId :: !Data.Int.Int32,
                              +                                         _BundleEntryProto'offset :: !Data.Int.Int64,
                              +                                         _BundleEntryProto'size :: !Data.Int.Int64,
                              +                                         _BundleEntryProto'crc32c :: !Data.Word.Word32,
                              +                                         _BundleEntryProto'slices ::
                              +                                         ![Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto]}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "dtype" f BundleEntryProto BundleEntryProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleEntryProto'dtype
                              +                 (\ x__ y__ -> x__{_BundleEntryProto'dtype = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "shape" f BundleEntryProto BundleEntryProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleEntryProto'shape
                              +                 (\ x__ y__ -> x__{_BundleEntryProto'shape = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'shape" f BundleEntryProto
                              +           BundleEntryProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleEntryProto'shape
                              +                 (\ x__ y__ -> x__{_BundleEntryProto'shape = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "shardId" f BundleEntryProto BundleEntryProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleEntryProto'shardId
                              +                 (\ x__ y__ -> x__{_BundleEntryProto'shardId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "offset" f BundleEntryProto BundleEntryProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleEntryProto'offset
                              +                 (\ x__ y__ -> x__{_BundleEntryProto'offset = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "size" f BundleEntryProto BundleEntryProto a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleEntryProto'size
                              +                 (\ x__ y__ -> x__{_BundleEntryProto'size = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Word.Word32, b ~ Data.Word.Word32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "crc32c" f BundleEntryProto BundleEntryProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleEntryProto'crc32c
                              +                 (\ x__ y__ -> x__{_BundleEntryProto'crc32c = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            [Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto],
                              +          b ~ [Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "slices" f BundleEntryProto BundleEntryProto a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleEntryProto'slices
                              +                 (\ x__ y__ -> x__{_BundleEntryProto'slices = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default BundleEntryProto where
                              +        def
                              +          = BundleEntryProto{_BundleEntryProto'dtype =
                              +                               Data.Default.Class.def,
                              +                             _BundleEntryProto'shape = Prelude.Nothing,
                              +                             _BundleEntryProto'shardId = Data.ProtoLens.fieldDefault,
                              +                             _BundleEntryProto'offset = Data.ProtoLens.fieldDefault,
                              +                             _BundleEntryProto'size = Data.ProtoLens.fieldDefault,
                              +                             _BundleEntryProto'crc32c = Data.ProtoLens.fieldDefault,
                              +                             _BundleEntryProto'slices = []}
                              +
                              +instance Data.ProtoLens.Message BundleEntryProto where
                              +        descriptor
                              +          = let dtype__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "dtype"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleEntryProto
                              +                shape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "shape"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
                              +                      (Data.ProtoLens.OptionalField maybe'shape)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleEntryProto
                              +                shardId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "shard_id"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional shardId)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleEntryProto
                              +                offset__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "offset"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional offset)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleEntryProto
                              +                size__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "size"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional size)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleEntryProto
                              +                crc32c__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "crc32c"
                              +                      (Data.ProtoLens.Fixed32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional crc32c)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleEntryProto
                              +                slices__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "slices"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked slices)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleEntryProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.BundleEntryProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, dtype__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, shape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, shardId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, offset__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, size__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, crc32c__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, slices__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("dtype", dtype__field_descriptor),
                              +                    ("shape", shape__field_descriptor),
                              +                    ("shard_id", shardId__field_descriptor),
                              +                    ("offset", offset__field_descriptor),
                              +                    ("size", size__field_descriptor),
                              +                    ("crc32c", crc32c__field_descriptor),
                              +                    ("slices", slices__field_descriptor)])
                              +
                              +data BundleHeaderProto = BundleHeaderProto{_BundleHeaderProto'numShards
                              +                                           :: !Data.Int.Int32,
                              +                                           _BundleHeaderProto'endianness ::
                              +                                           !BundleHeaderProto'Endianness,
                              +                                           _BundleHeaderProto'version ::
                              +                                           !(Prelude.Maybe
                              +                                               Proto.Tensorflow.Core.Framework.Versions.VersionDef)}
                              +                       deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "numShards" f BundleHeaderProto
                              +           BundleHeaderProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleHeaderProto'numShards
                              +                 (\ x__ y__ -> x__{_BundleHeaderProto'numShards = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ BundleHeaderProto'Endianness,
                              +          b ~ BundleHeaderProto'Endianness, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "endianness" f BundleHeaderProto
                              +           BundleHeaderProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleHeaderProto'endianness
                              +                 (\ x__ y__ -> x__{_BundleHeaderProto'endianness = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          b ~ Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "version" f BundleHeaderProto BundleHeaderProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleHeaderProto'version
                              +                 (\ x__ y__ -> x__{_BundleHeaderProto'version = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'version" f BundleHeaderProto
                              +           BundleHeaderProto
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BundleHeaderProto'version
                              +                 (\ x__ y__ -> x__{_BundleHeaderProto'version = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default BundleHeaderProto where
                              +        def
                              +          = BundleHeaderProto{_BundleHeaderProto'numShards =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _BundleHeaderProto'endianness = Data.Default.Class.def,
                              +                              _BundleHeaderProto'version = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message BundleHeaderProto where
                              +        descriptor
                              +          = let numShards__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "num_shards"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numShards)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleHeaderProto
                              +                endianness__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "endianness"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor BundleHeaderProto'Endianness)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional endianness)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleHeaderProto
                              +                version__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "version"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Versions.VersionDef)
                              +                      (Data.ProtoLens.OptionalField maybe'version)
                              +                      :: Data.ProtoLens.FieldDescriptor BundleHeaderProto
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.BundleHeaderProto")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, numShards__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, endianness__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, version__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("num_shards", numShards__field_descriptor),
                              +                    ("endianness", endianness__field_descriptor),
                              +                    ("version", version__field_descriptor)])
                              +
                              +data BundleHeaderProto'Endianness = BundleHeaderProto'LITTLE
                              +                                  | BundleHeaderProto'BIG
                              +                                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default BundleHeaderProto'Endianness
                              +         where
                              +        def = BundleHeaderProto'LITTLE
                              +
                              +instance Data.ProtoLens.FieldDefault BundleHeaderProto'Endianness
                              +         where
                              +        fieldDefault = BundleHeaderProto'LITTLE
                              +
                              +instance Data.ProtoLens.MessageEnum BundleHeaderProto'Endianness
                              +         where
                              +        maybeToEnum 0 = Prelude.Just BundleHeaderProto'LITTLE
                              +        maybeToEnum 1 = Prelude.Just BundleHeaderProto'BIG
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum BundleHeaderProto'LITTLE = "LITTLE"
                              +        showEnum BundleHeaderProto'BIG = "BIG"
                              +        readEnum "LITTLE" = Prelude.Just BundleHeaderProto'LITTLE
                              +        readEnum "BIG" = Prelude.Just BundleHeaderProto'BIG
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum BundleHeaderProto'Endianness where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum Endianness: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum BundleHeaderProto'LITTLE = 0
                              +        fromEnum BundleHeaderProto'BIG = 1
                              +        succ BundleHeaderProto'BIG
                              +          = Prelude.error
                              +              "BundleHeaderProto'Endianness.succ: bad argument BundleHeaderProto'BIG. This value would be out of bounds."
                              +        succ BundleHeaderProto'LITTLE = BundleHeaderProto'BIG
                              +        pred BundleHeaderProto'LITTLE
                              +          = Prelude.error
                              +              "BundleHeaderProto'Endianness.pred: bad argument BundleHeaderProto'LITTLE. This value would be out of bounds."
                              +        pred BundleHeaderProto'BIG = BundleHeaderProto'LITTLE
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded BundleHeaderProto'Endianness where
                              +        minBound = BundleHeaderProto'LITTLE
                              +        maxBound = BundleHeaderProto'BIG
                              +
                              +crc32c ::
                              +       forall f s t a b . (Lens.Labels.HasLens "crc32c" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +crc32c
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "crc32c")
                              +
                              +dtype ::
                              +      forall f s t a b . (Lens.Labels.HasLens "dtype" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +dtype
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "dtype")
                              +
                              +endianness ::
                              +           forall f s t a b . (Lens.Labels.HasLens "endianness" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +endianness
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "endianness")
                              +
                              +maybe'shape ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'shape" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'shape")
                              +
                              +maybe'version ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybe'version" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybe'version
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'version")
                              +
                              +numShards ::
                              +          forall f s t a b . (Lens.Labels.HasLens "numShards" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +numShards
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "numShards")
                              +
                              +offset ::
                              +       forall f s t a b . (Lens.Labels.HasLens "offset" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +offset
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "offset")
                              +
                              +shape ::
                              +      forall f s t a b . (Lens.Labels.HasLens "shape" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "shape")
                              +
                              +shardId ::
                              +        forall f s t a b . (Lens.Labels.HasLens "shardId" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +shardId
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "shardId")
                              +
                              +size ::
                              +     forall f s t a b . (Lens.Labels.HasLens "size" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +size
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "size")
                              +
                              +slices ::
                              +       forall f s t a b . (Lens.Labels.HasLens "slices" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +slices
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "slices")
                              +
                              +version ::
                              +        forall f s t a b . (Lens.Labels.HasLens "version" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +version
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "version")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.TensorflowServer.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.TensorflowServer.html new file mode 100644 index 0000000..032d49f --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Protobuf.TensorflowServer.html @@ -0,0 +1,231 @@ +
                              {- This file was auto-generated from tensorflow/core/protobuf/tensorflow_server.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Protobuf.TensorflowServer where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Protobuf.Cluster
                              +import qualified Proto.Tensorflow.Core.Protobuf.Config
                              +
                              +data ServerDef = ServerDef{_ServerDef'cluster ::
                              +                           !(Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef),
                              +                           _ServerDef'jobName :: !Data.Text.Text,
                              +                           _ServerDef'taskIndex :: !Data.Int.Int32,
                              +                           _ServerDef'defaultSessionConfig ::
                              +                           !(Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Config.ConfigProto),
                              +                           _ServerDef'protocol :: !Data.Text.Text}
                              +               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef,
                              +          b ~ Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "cluster" f ServerDef ServerDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ServerDef'cluster
                              +                 (\ x__ y__ -> x__{_ServerDef'cluster = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'cluster" f ServerDef ServerDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ServerDef'cluster
                              +                 (\ x__ y__ -> x__{_ServerDef'cluster = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "jobName" f ServerDef ServerDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ServerDef'jobName
                              +                 (\ x__ y__ -> x__{_ServerDef'jobName = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int32, b ~ Data.Int.Int32,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "taskIndex" f ServerDef ServerDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ServerDef'taskIndex
                              +                 (\ x__ y__ -> x__{_ServerDef'taskIndex = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Protobuf.Config.ConfigProto,
                              +          b ~ Proto.Tensorflow.Core.Protobuf.Config.ConfigProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "defaultSessionConfig" f ServerDef ServerDef a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ServerDef'defaultSessionConfig
                              +                 (\ x__ y__ -> x__{_ServerDef'defaultSessionConfig = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Config.ConfigProto,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Protobuf.Config.ConfigProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'defaultSessionConfig" f ServerDef
                              +           ServerDef
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ServerDef'defaultSessionConfig
                              +                 (\ x__ y__ -> x__{_ServerDef'defaultSessionConfig = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "protocol" f ServerDef ServerDef a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _ServerDef'protocol
                              +                 (\ x__ y__ -> x__{_ServerDef'protocol = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default ServerDef where
                              +        def
                              +          = ServerDef{_ServerDef'cluster = Prelude.Nothing,
                              +                      _ServerDef'jobName = Data.ProtoLens.fieldDefault,
                              +                      _ServerDef'taskIndex = Data.ProtoLens.fieldDefault,
                              +                      _ServerDef'defaultSessionConfig = Prelude.Nothing,
                              +                      _ServerDef'protocol = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message ServerDef where
                              +        descriptor
                              +          = let cluster__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cluster"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Protobuf.Cluster.ClusterDef)
                              +                      (Data.ProtoLens.OptionalField maybe'cluster)
                              +                      :: Data.ProtoLens.FieldDescriptor ServerDef
                              +                jobName__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "job_name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional jobName)
                              +                      :: Data.ProtoLens.FieldDescriptor ServerDef
                              +                taskIndex__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "task_index"
                              +                      (Data.ProtoLens.Int32Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional taskIndex)
                              +                      :: Data.ProtoLens.FieldDescriptor ServerDef
                              +                defaultSessionConfig__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "default_session_config"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Protobuf.Config.ConfigProto)
                              +                      (Data.ProtoLens.OptionalField maybe'defaultSessionConfig)
                              +                      :: Data.ProtoLens.FieldDescriptor ServerDef
                              +                protocol__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "protocol"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional protocol)
                              +                      :: Data.ProtoLens.FieldDescriptor ServerDef
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.ServerDef")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, cluster__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, jobName__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, taskIndex__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, defaultSessionConfig__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, protocol__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("cluster", cluster__field_descriptor),
                              +                    ("job_name", jobName__field_descriptor),
                              +                    ("task_index", taskIndex__field_descriptor),
                              +                    ("default_session_config", defaultSessionConfig__field_descriptor),
                              +                    ("protocol", protocol__field_descriptor)])
                              +
                              +cluster ::
                              +        forall f s t a b . (Lens.Labels.HasLens "cluster" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +cluster
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "cluster")
                              +
                              +defaultSessionConfig ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "defaultSessionConfig" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +defaultSessionConfig
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "defaultSessionConfig")
                              +
                              +jobName ::
                              +        forall f s t a b . (Lens.Labels.HasLens "jobName" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +jobName
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "jobName")
                              +
                              +maybe'cluster ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybe'cluster" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybe'cluster
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'cluster")
                              +
                              +maybe'defaultSessionConfig ::
                              +                           forall f s t a b .
                              +                             (Lens.Labels.HasLens "maybe'defaultSessionConfig" f s t a b) =>
                              +                             Lens.Family2.LensLike f s t a b
                              +maybe'defaultSessionConfig
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'defaultSessionConfig")
                              +
                              +protocol ::
                              +         forall f s t a b . (Lens.Labels.HasLens "protocol" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +protocol
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "protocol")
                              +
                              +taskIndex ::
                              +          forall f s t a b . (Lens.Labels.HasLens "taskIndex" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +taskIndex
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "taskIndex")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.Event.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.Event.html new file mode 100644 index 0000000..3e86199 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.Event.html @@ -0,0 +1,888 @@ +
                              {- This file was auto-generated from tensorflow/core/util/event.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Util.Event where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.Summary
                              +
                              +data Event = Event{_Event'wallTime :: !Prelude.Double,
                              +                   _Event'step :: !Data.Int.Int64,
                              +                   _Event'what :: !(Prelude.Maybe Event'What)}
                              +           deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data Event'What = Event'FileVersion !Data.Text.Text
                              +                | Event'GraphDef !Data.ByteString.ByteString
                              +                | Event'Summary !Proto.Tensorflow.Core.Framework.Summary.Summary
                              +                | Event'LogMessage !LogMessage
                              +                | Event'SessionLog !SessionLog
                              +                | Event'TaggedRunMetadata !TaggedRunMetadata
                              +                | Event'MetaGraphDef !Data.ByteString.ByteString
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "wallTime" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'wallTime
                              +                 (\ x__ y__ -> x__{_Event'wallTime = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "step" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'step
                              +                 (\ x__ y__ -> x__{_Event'step = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe Event'What,
                              +          b ~ Prelude.Maybe Event'What, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'what" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe Data.Text.Text,
                              +          b ~ Prelude.Maybe Data.Text.Text, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'fileVersion" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Event'FileVersion x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Event'FileVersion y__))
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "fileVersion" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Event'FileVersion x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Event'FileVersion y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~ Prelude.Maybe Data.ByteString.ByteString,
                              +          b ~ Prelude.Maybe Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'graphDef" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Event'GraphDef x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Event'GraphDef y__))
                              +
                              +instance (a ~ Data.ByteString.ByteString,
                              +          b ~ Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "graphDef" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Event'GraphDef x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Event'GraphDef y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Summary.Summary,
                              +          b ~ Prelude.Maybe Proto.Tensorflow.Core.Framework.Summary.Summary,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'summary" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Event'Summary x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Event'Summary y__))
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Summary.Summary,
                              +          b ~ Proto.Tensorflow.Core.Framework.Summary.Summary,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "summary" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Event'Summary x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Event'Summary y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe LogMessage,
                              +          b ~ Prelude.Maybe LogMessage, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'logMessage" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Event'LogMessage x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Event'LogMessage y__))
                              +
                              +instance (a ~ LogMessage, b ~ LogMessage, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "logMessage" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Event'LogMessage x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Event'LogMessage y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe SessionLog,
                              +          b ~ Prelude.Maybe SessionLog, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'sessionLog" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Event'SessionLog x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Event'SessionLog y__))
                              +
                              +instance (a ~ SessionLog, b ~ SessionLog, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "sessionLog" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Event'SessionLog x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Event'SessionLog y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe TaggedRunMetadata,
                              +          b ~ Prelude.Maybe TaggedRunMetadata, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'taggedRunMetadata" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Event'TaggedRunMetadata x__val) -> Prelude.Just
                              +                                                                           x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Event'TaggedRunMetadata y__))
                              +
                              +instance (a ~ TaggedRunMetadata, b ~ TaggedRunMetadata,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "taggedRunMetadata" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Event'TaggedRunMetadata x__val) -> Prelude.Just
                              +                                                                              x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Event'TaggedRunMetadata y__))
                              +                 (Data.ProtoLens.maybeLens Data.Default.Class.def))
                              +
                              +instance (a ~ Prelude.Maybe Data.ByteString.ByteString,
                              +          b ~ Prelude.Maybe Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'metaGraphDef" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (Event'MetaGraphDef x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap Event'MetaGraphDef y__))
                              +
                              +instance (a ~ Data.ByteString.ByteString,
                              +          b ~ Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "metaGraphDef" f Event Event a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _Event'what
                              +                 (\ x__ y__ -> x__{_Event'what = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (Event'MetaGraphDef x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap Event'MetaGraphDef y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance Data.Default.Class.Default Event where
                              +        def
                              +          = Event{_Event'wallTime = Data.ProtoLens.fieldDefault,
                              +                  _Event'step = Data.ProtoLens.fieldDefault,
                              +                  _Event'what = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message Event where
                              +        descriptor
                              +          = let wallTime__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "wall_time"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional wallTime)
                              +                      :: Data.ProtoLens.FieldDescriptor Event
                              +                step__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "step"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional step)
                              +                      :: Data.ProtoLens.FieldDescriptor Event
                              +                fileVersion__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "file_version"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.OptionalField maybe'fileVersion)
                              +                      :: Data.ProtoLens.FieldDescriptor Event
                              +                graphDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "graph_def"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.OptionalField maybe'graphDef)
                              +                      :: Data.ProtoLens.FieldDescriptor Event
                              +                summary__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "summary"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Summary.Summary)
                              +                      (Data.ProtoLens.OptionalField maybe'summary)
                              +                      :: Data.ProtoLens.FieldDescriptor Event
                              +                logMessage__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "log_message"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor LogMessage)
                              +                      (Data.ProtoLens.OptionalField maybe'logMessage)
                              +                      :: Data.ProtoLens.FieldDescriptor Event
                              +                sessionLog__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "session_log"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SessionLog)
                              +                      (Data.ProtoLens.OptionalField maybe'sessionLog)
                              +                      :: Data.ProtoLens.FieldDescriptor Event
                              +                taggedRunMetadata__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tagged_run_metadata"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor TaggedRunMetadata)
                              +                      (Data.ProtoLens.OptionalField maybe'taggedRunMetadata)
                              +                      :: Data.ProtoLens.FieldDescriptor Event
                              +                metaGraphDef__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "meta_graph_def"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.OptionalField maybe'metaGraphDef)
                              +                      :: Data.ProtoLens.FieldDescriptor Event
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.Event")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, wallTime__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, step__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, fileVersion__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, graphDef__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, summary__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, logMessage__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, sessionLog__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, taggedRunMetadata__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, metaGraphDef__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("wall_time", wallTime__field_descriptor),
                              +                    ("step", step__field_descriptor),
                              +                    ("file_version", fileVersion__field_descriptor),
                              +                    ("graph_def", graphDef__field_descriptor),
                              +                    ("summary", summary__field_descriptor),
                              +                    ("log_message", logMessage__field_descriptor),
                              +                    ("session_log", sessionLog__field_descriptor),
                              +                    ("tagged_run_metadata", taggedRunMetadata__field_descriptor),
                              +                    ("meta_graph_def", metaGraphDef__field_descriptor)])
                              +
                              +data LogMessage = LogMessage{_LogMessage'level ::
                              +                             !LogMessage'Level,
                              +                             _LogMessage'message :: !Data.Text.Text}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ LogMessage'Level, b ~ LogMessage'Level,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "level" f LogMessage LogMessage a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _LogMessage'level
                              +                 (\ x__ y__ -> x__{_LogMessage'level = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "message" f LogMessage LogMessage a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _LogMessage'message
                              +                 (\ x__ y__ -> x__{_LogMessage'message = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default LogMessage where
                              +        def
                              +          = LogMessage{_LogMessage'level = Data.Default.Class.def,
                              +                       _LogMessage'message = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message LogMessage where
                              +        descriptor
                              +          = let level__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "level"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor LogMessage'Level)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional level)
                              +                      :: Data.ProtoLens.FieldDescriptor LogMessage
                              +                message__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "message"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional message)
                              +                      :: Data.ProtoLens.FieldDescriptor LogMessage
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.LogMessage")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, level__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, message__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("level", level__field_descriptor),
                              +                    ("message", message__field_descriptor)])
                              +
                              +data LogMessage'Level = LogMessage'UNKNOWN
                              +                      | LogMessage'DEBUGGING
                              +                      | LogMessage'INFO
                              +                      | LogMessage'WARN
                              +                      | LogMessage'ERROR
                              +                      | LogMessage'FATAL
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default LogMessage'Level where
                              +        def = LogMessage'UNKNOWN
                              +
                              +instance Data.ProtoLens.FieldDefault LogMessage'Level where
                              +        fieldDefault = LogMessage'UNKNOWN
                              +
                              +instance Data.ProtoLens.MessageEnum LogMessage'Level where
                              +        maybeToEnum 0 = Prelude.Just LogMessage'UNKNOWN
                              +        maybeToEnum 10 = Prelude.Just LogMessage'DEBUGGING
                              +        maybeToEnum 20 = Prelude.Just LogMessage'INFO
                              +        maybeToEnum 30 = Prelude.Just LogMessage'WARN
                              +        maybeToEnum 40 = Prelude.Just LogMessage'ERROR
                              +        maybeToEnum 50 = Prelude.Just LogMessage'FATAL
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum LogMessage'UNKNOWN = "UNKNOWN"
                              +        showEnum LogMessage'DEBUGGING = "DEBUGGING"
                              +        showEnum LogMessage'INFO = "INFO"
                              +        showEnum LogMessage'WARN = "WARN"
                              +        showEnum LogMessage'ERROR = "ERROR"
                              +        showEnum LogMessage'FATAL = "FATAL"
                              +        readEnum "UNKNOWN" = Prelude.Just LogMessage'UNKNOWN
                              +        readEnum "DEBUGGING" = Prelude.Just LogMessage'DEBUGGING
                              +        readEnum "INFO" = Prelude.Just LogMessage'INFO
                              +        readEnum "WARN" = Prelude.Just LogMessage'WARN
                              +        readEnum "ERROR" = Prelude.Just LogMessage'ERROR
                              +        readEnum "FATAL" = Prelude.Just LogMessage'FATAL
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum LogMessage'Level where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum Level: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum LogMessage'UNKNOWN = 0
                              +        fromEnum LogMessage'DEBUGGING = 10
                              +        fromEnum LogMessage'INFO = 20
                              +        fromEnum LogMessage'WARN = 30
                              +        fromEnum LogMessage'ERROR = 40
                              +        fromEnum LogMessage'FATAL = 50
                              +        succ LogMessage'FATAL
                              +          = Prelude.error
                              +              "LogMessage'Level.succ: bad argument LogMessage'FATAL. This value would be out of bounds."
                              +        succ LogMessage'UNKNOWN = LogMessage'DEBUGGING
                              +        succ LogMessage'DEBUGGING = LogMessage'INFO
                              +        succ LogMessage'INFO = LogMessage'WARN
                              +        succ LogMessage'WARN = LogMessage'ERROR
                              +        succ LogMessage'ERROR = LogMessage'FATAL
                              +        pred LogMessage'UNKNOWN
                              +          = Prelude.error
                              +              "LogMessage'Level.pred: bad argument LogMessage'UNKNOWN. This value would be out of bounds."
                              +        pred LogMessage'DEBUGGING = LogMessage'UNKNOWN
                              +        pred LogMessage'INFO = LogMessage'DEBUGGING
                              +        pred LogMessage'WARN = LogMessage'INFO
                              +        pred LogMessage'ERROR = LogMessage'WARN
                              +        pred LogMessage'FATAL = LogMessage'ERROR
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded LogMessage'Level where
                              +        minBound = LogMessage'UNKNOWN
                              +        maxBound = LogMessage'FATAL
                              +
                              +data SessionLog = SessionLog{_SessionLog'status ::
                              +                             !SessionLog'SessionStatus,
                              +                             _SessionLog'checkpointPath :: !Data.Text.Text,
                              +                             _SessionLog'msg :: !Data.Text.Text}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ SessionLog'SessionStatus,
                              +          b ~ SessionLog'SessionStatus, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "status" f SessionLog SessionLog a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SessionLog'status
                              +                 (\ x__ y__ -> x__{_SessionLog'status = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "checkpointPath" f SessionLog SessionLog a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SessionLog'checkpointPath
                              +                 (\ x__ y__ -> x__{_SessionLog'checkpointPath = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "msg" f SessionLog SessionLog a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SessionLog'msg
                              +                 (\ x__ y__ -> x__{_SessionLog'msg = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SessionLog where
                              +        def
                              +          = SessionLog{_SessionLog'status = Data.Default.Class.def,
                              +                       _SessionLog'checkpointPath = Data.ProtoLens.fieldDefault,
                              +                       _SessionLog'msg = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message SessionLog where
                              +        descriptor
                              +          = let status__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "status"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SessionLog'SessionStatus)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional status)
                              +                      :: Data.ProtoLens.FieldDescriptor SessionLog
                              +                checkpointPath__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "checkpoint_path"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional checkpointPath)
                              +                      :: Data.ProtoLens.FieldDescriptor SessionLog
                              +                msg__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "msg"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional msg)
                              +                      :: Data.ProtoLens.FieldDescriptor SessionLog
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SessionLog")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, status__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, checkpointPath__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, msg__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("status", status__field_descriptor),
                              +                    ("checkpoint_path", checkpointPath__field_descriptor),
                              +                    ("msg", msg__field_descriptor)])
                              +
                              +data SessionLog'SessionStatus = SessionLog'STATUS_UNSPECIFIED
                              +                              | SessionLog'START
                              +                              | SessionLog'STOP
                              +                              | SessionLog'CHECKPOINT
                              +                              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default SessionLog'SessionStatus where
                              +        def = SessionLog'STATUS_UNSPECIFIED
                              +
                              +instance Data.ProtoLens.FieldDefault SessionLog'SessionStatus where
                              +        fieldDefault = SessionLog'STATUS_UNSPECIFIED
                              +
                              +instance Data.ProtoLens.MessageEnum SessionLog'SessionStatus where
                              +        maybeToEnum 0 = Prelude.Just SessionLog'STATUS_UNSPECIFIED
                              +        maybeToEnum 1 = Prelude.Just SessionLog'START
                              +        maybeToEnum 2 = Prelude.Just SessionLog'STOP
                              +        maybeToEnum 3 = Prelude.Just SessionLog'CHECKPOINT
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum SessionLog'STATUS_UNSPECIFIED = "STATUS_UNSPECIFIED"
                              +        showEnum SessionLog'START = "START"
                              +        showEnum SessionLog'STOP = "STOP"
                              +        showEnum SessionLog'CHECKPOINT = "CHECKPOINT"
                              +        readEnum "STATUS_UNSPECIFIED"
                              +          = Prelude.Just SessionLog'STATUS_UNSPECIFIED
                              +        readEnum "START" = Prelude.Just SessionLog'START
                              +        readEnum "STOP" = Prelude.Just SessionLog'STOP
                              +        readEnum "CHECKPOINT" = Prelude.Just SessionLog'CHECKPOINT
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum SessionLog'SessionStatus where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum SessionStatus: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum SessionLog'STATUS_UNSPECIFIED = 0
                              +        fromEnum SessionLog'START = 1
                              +        fromEnum SessionLog'STOP = 2
                              +        fromEnum SessionLog'CHECKPOINT = 3
                              +        succ SessionLog'CHECKPOINT
                              +          = Prelude.error
                              +              "SessionLog'SessionStatus.succ: bad argument SessionLog'CHECKPOINT. This value would be out of bounds."
                              +        succ SessionLog'STATUS_UNSPECIFIED = SessionLog'START
                              +        succ SessionLog'START = SessionLog'STOP
                              +        succ SessionLog'STOP = SessionLog'CHECKPOINT
                              +        pred SessionLog'STATUS_UNSPECIFIED
                              +          = Prelude.error
                              +              "SessionLog'SessionStatus.pred: bad argument SessionLog'STATUS_UNSPECIFIED. This value would be out of bounds."
                              +        pred SessionLog'START = SessionLog'STATUS_UNSPECIFIED
                              +        pred SessionLog'STOP = SessionLog'START
                              +        pred SessionLog'CHECKPOINT = SessionLog'STOP
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded SessionLog'SessionStatus where
                              +        minBound = SessionLog'STATUS_UNSPECIFIED
                              +        maxBound = SessionLog'CHECKPOINT
                              +
                              +data TaggedRunMetadata = TaggedRunMetadata{_TaggedRunMetadata'tag
                              +                                           :: !Data.Text.Text,
                              +                                           _TaggedRunMetadata'runMetadata ::
                              +                                           !Data.ByteString.ByteString}
                              +                       deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tag" f TaggedRunMetadata TaggedRunMetadata a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TaggedRunMetadata'tag
                              +                 (\ x__ y__ -> x__{_TaggedRunMetadata'tag = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.ByteString.ByteString,
                              +          b ~ Data.ByteString.ByteString, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "runMetadata" f TaggedRunMetadata
                              +           TaggedRunMetadata
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TaggedRunMetadata'runMetadata
                              +                 (\ x__ y__ -> x__{_TaggedRunMetadata'runMetadata = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default TaggedRunMetadata where
                              +        def
                              +          = TaggedRunMetadata{_TaggedRunMetadata'tag =
                              +                                Data.ProtoLens.fieldDefault,
                              +                              _TaggedRunMetadata'runMetadata = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message TaggedRunMetadata where
                              +        descriptor
                              +          = let tag__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tag"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional tag)
                              +                      :: Data.ProtoLens.FieldDescriptor TaggedRunMetadata
                              +                runMetadata__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "run_metadata"
                              +                      (Data.ProtoLens.BytesField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional runMetadata)
                              +                      :: Data.ProtoLens.FieldDescriptor TaggedRunMetadata
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TaggedRunMetadata")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, tag__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, runMetadata__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("tag", tag__field_descriptor),
                              +                    ("run_metadata", runMetadata__field_descriptor)])
                              +
                              +checkpointPath ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "checkpointPath" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +checkpointPath
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "checkpointPath")
                              +
                              +fileVersion ::
                              +            forall f s t a b . (Lens.Labels.HasLens "fileVersion" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +fileVersion
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "fileVersion")
                              +
                              +graphDef ::
                              +         forall f s t a b . (Lens.Labels.HasLens "graphDef" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +graphDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "graphDef")
                              +
                              +level ::
                              +      forall f s t a b . (Lens.Labels.HasLens "level" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +level
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "level")
                              +
                              +logMessage ::
                              +           forall f s t a b . (Lens.Labels.HasLens "logMessage" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +logMessage
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "logMessage")
                              +
                              +maybe'fileVersion ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'fileVersion" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'fileVersion
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'fileVersion")
                              +
                              +maybe'graphDef ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'graphDef" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'graphDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'graphDef")
                              +
                              +maybe'logMessage ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "maybe'logMessage" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +maybe'logMessage
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'logMessage")
                              +
                              +maybe'metaGraphDef ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "maybe'metaGraphDef" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +maybe'metaGraphDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'metaGraphDef")
                              +
                              +maybe'sessionLog ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "maybe'sessionLog" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +maybe'sessionLog
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'sessionLog")
                              +
                              +maybe'summary ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybe'summary" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybe'summary
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'summary")
                              +
                              +maybe'taggedRunMetadata ::
                              +                        forall f s t a b .
                              +                          (Lens.Labels.HasLens "maybe'taggedRunMetadata" f s t a b) =>
                              +                          Lens.Family2.LensLike f s t a b
                              +maybe'taggedRunMetadata
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'taggedRunMetadata")
                              +
                              +maybe'what ::
                              +           forall f s t a b . (Lens.Labels.HasLens "maybe'what" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +maybe'what
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'what")
                              +
                              +message ::
                              +        forall f s t a b . (Lens.Labels.HasLens "message" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +message
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "message")
                              +
                              +metaGraphDef ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "metaGraphDef" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +metaGraphDef
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "metaGraphDef")
                              +
                              +msg ::
                              +    forall f s t a b . (Lens.Labels.HasLens "msg" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +msg
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "msg")
                              +
                              +runMetadata ::
                              +            forall f s t a b . (Lens.Labels.HasLens "runMetadata" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +runMetadata
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "runMetadata")
                              +
                              +sessionLog ::
                              +           forall f s t a b . (Lens.Labels.HasLens "sessionLog" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +sessionLog
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "sessionLog")
                              +
                              +status ::
                              +       forall f s t a b . (Lens.Labels.HasLens "status" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +status
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "status")
                              +
                              +step ::
                              +     forall f s t a b . (Lens.Labels.HasLens "step" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +step
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "step")
                              +
                              +summary ::
                              +        forall f s t a b . (Lens.Labels.HasLens "summary" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +summary
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "summary")
                              +
                              +tag ::
                              +    forall f s t a b . (Lens.Labels.HasLens "tag" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +tag
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tag")
                              +
                              +taggedRunMetadata ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "taggedRunMetadata" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +taggedRunMetadata
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "taggedRunMetadata")
                              +
                              +wallTime ::
                              +         forall f s t a b . (Lens.Labels.HasLens "wallTime" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +wallTime
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "wallTime")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.MemmappedFileSystem.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.MemmappedFileSystem.html new file mode 100644 index 0000000..d1852f9 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.MemmappedFileSystem.html @@ -0,0 +1,159 @@ +
                              {- This file was auto-generated from tensorflow/core/util/memmapped_file_system.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Util.MemmappedFileSystem where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +
                              +data MemmappedFileSystemDirectory = MemmappedFileSystemDirectory{_MemmappedFileSystemDirectory'element
                              +                                                                 ::
                              +                                                                 ![MemmappedFileSystemDirectoryElement]}
                              +                                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [MemmappedFileSystemDirectoryElement],
                              +          b ~ [MemmappedFileSystemDirectoryElement], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "element" f MemmappedFileSystemDirectory
                              +           MemmappedFileSystemDirectory
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemmappedFileSystemDirectory'element
                              +                 (\ x__ y__ -> x__{_MemmappedFileSystemDirectory'element = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MemmappedFileSystemDirectory
                              +         where
                              +        def
                              +          = MemmappedFileSystemDirectory{_MemmappedFileSystemDirectory'element
                              +                                           = []}
                              +
                              +instance Data.ProtoLens.Message MemmappedFileSystemDirectory where
                              +        descriptor
                              +          = let element__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "element"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           MemmappedFileSystemDirectoryElement)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked element)
                              +                      :: Data.ProtoLens.FieldDescriptor MemmappedFileSystemDirectory
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemmappedFileSystemDirectory")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, element__field_descriptor)])
                              +                (Data.Map.fromList [("element", element__field_descriptor)])
                              +
                              +data MemmappedFileSystemDirectoryElement = MemmappedFileSystemDirectoryElement{_MemmappedFileSystemDirectoryElement'offset
                              +                                                                               :: !Data.Word.Word64,
                              +                                                                               _MemmappedFileSystemDirectoryElement'name
                              +                                                                               :: !Data.Text.Text}
                              +                                         deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Word.Word64, b ~ Data.Word.Word64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "offset" f MemmappedFileSystemDirectoryElement
                              +           MemmappedFileSystemDirectoryElement
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MemmappedFileSystemDirectoryElement'offset
                              +                 (\ x__ y__ ->
                              +                    x__{_MemmappedFileSystemDirectoryElement'offset = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f MemmappedFileSystemDirectoryElement
                              +           MemmappedFileSystemDirectoryElement
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MemmappedFileSystemDirectoryElement'name
                              +                 (\ x__ y__ ->
                              +                    x__{_MemmappedFileSystemDirectoryElement'name = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default
                              +           MemmappedFileSystemDirectoryElement
                              +         where
                              +        def
                              +          = MemmappedFileSystemDirectoryElement{_MemmappedFileSystemDirectoryElement'offset
                              +                                                  = Data.ProtoLens.fieldDefault,
                              +                                                _MemmappedFileSystemDirectoryElement'name =
                              +                                                  Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message MemmappedFileSystemDirectoryElement
                              +         where
                              +        descriptor
                              +          = let offset__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "offset"
                              +                      (Data.ProtoLens.UInt64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional offset)
                              +                      ::
                              +                      Data.ProtoLens.FieldDescriptor MemmappedFileSystemDirectoryElement
                              +                name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      ::
                              +                      Data.ProtoLens.FieldDescriptor MemmappedFileSystemDirectoryElement
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemmappedFileSystemDirectoryElement")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, offset__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, name__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("offset", offset__field_descriptor),
                              +                    ("name", name__field_descriptor)])
                              +
                              +element ::
                              +        forall f s t a b . (Lens.Labels.HasLens "element" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +element
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "element")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +offset ::
                              +       forall f s t a b . (Lens.Labels.HasLens "offset" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +offset
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "offset")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.SavedTensorSlice.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.SavedTensorSlice.html new file mode 100644 index 0000000..d442139 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.SavedTensorSlice.html @@ -0,0 +1,517 @@ +
                              {- This file was auto-generated from tensorflow/core/util/saved_tensor_slice.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Util.SavedTensorSlice where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Tensorflow.Core.Framework.Tensor
                              +import qualified Proto.Tensorflow.Core.Framework.TensorShape
                              +import qualified Proto.Tensorflow.Core.Framework.TensorSlice
                              +import qualified Proto.Tensorflow.Core.Framework.Types
                              +import qualified Proto.Tensorflow.Core.Framework.Versions
                              +
                              +data SavedSlice = SavedSlice{_SavedSlice'name :: !Data.Text.Text,
                              +                             _SavedSlice'slice ::
                              +                             !(Prelude.Maybe
                              +                                 Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto),
                              +                             _SavedSlice'data' ::
                              +                             !(Prelude.Maybe
                              +                                 Proto.Tensorflow.Core.Framework.Tensor.TensorProto)}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f SavedSlice SavedSlice a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSlice'name
                              +                 (\ x__ y__ -> x__{_SavedSlice'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "slice" f SavedSlice SavedSlice a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSlice'slice
                              +                 (\ x__ y__ -> x__{_SavedSlice'slice = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'slice" f SavedSlice SavedSlice a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSlice'slice
                              +                 (\ x__ y__ -> x__{_SavedSlice'slice = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "data'" f SavedSlice SavedSlice a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSlice'data'
                              +                 (\ x__ y__ -> x__{_SavedSlice'data' = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'data'" f SavedSlice SavedSlice a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSlice'data'
                              +                 (\ x__ y__ -> x__{_SavedSlice'data' = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SavedSlice where
                              +        def
                              +          = SavedSlice{_SavedSlice'name = Data.ProtoLens.fieldDefault,
                              +                       _SavedSlice'slice = Prelude.Nothing,
                              +                       _SavedSlice'data' = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message SavedSlice where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedSlice
                              +                slice__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "slice"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto)
                              +                      (Data.ProtoLens.OptionalField maybe'slice)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedSlice
                              +                data'__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "data"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
                              +                      (Data.ProtoLens.OptionalField maybe'data')
                              +                      :: Data.ProtoLens.FieldDescriptor SavedSlice
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SavedSlice")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, slice__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, data'__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("slice", slice__field_descriptor),
                              +                    ("data", data'__field_descriptor)])
                              +
                              +data SavedSliceMeta = SavedSliceMeta{_SavedSliceMeta'name ::
                              +                                     !Data.Text.Text,
                              +                                     _SavedSliceMeta'shape ::
                              +                                     !(Prelude.Maybe
                              +                                         Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto),
                              +                                     _SavedSliceMeta'type' ::
                              +                                     !Proto.Tensorflow.Core.Framework.Types.DataType,
                              +                                     _SavedSliceMeta'slice ::
                              +                                     ![Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto]}
                              +                    deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f SavedSliceMeta SavedSliceMeta a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSliceMeta'name
                              +                 (\ x__ y__ -> x__{_SavedSliceMeta'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~ Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "shape" f SavedSliceMeta SavedSliceMeta a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSliceMeta'shape
                              +                 (\ x__ y__ -> x__{_SavedSliceMeta'shape = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          b ~
                              +            Prelude.Maybe
                              +              Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'shape" f SavedSliceMeta SavedSliceMeta a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSliceMeta'shape
                              +                 (\ x__ y__ -> x__{_SavedSliceMeta'shape = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          b ~ Proto.Tensorflow.Core.Framework.Types.DataType,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "type'" f SavedSliceMeta SavedSliceMeta a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSliceMeta'type'
                              +                 (\ x__ y__ -> x__{_SavedSliceMeta'type' = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~
                              +            [Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto],
                              +          b ~ [Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "slice" f SavedSliceMeta SavedSliceMeta a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedSliceMeta'slice
                              +                 (\ x__ y__ -> x__{_SavedSliceMeta'slice = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SavedSliceMeta where
                              +        def
                              +          = SavedSliceMeta{_SavedSliceMeta'name =
                              +                             Data.ProtoLens.fieldDefault,
                              +                           _SavedSliceMeta'shape = Prelude.Nothing,
                              +                           _SavedSliceMeta'type' = Data.Default.Class.def,
                              +                           _SavedSliceMeta'slice = []}
                              +
                              +instance Data.ProtoLens.Message SavedSliceMeta where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedSliceMeta
                              +                shape__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "shape"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
                              +                      (Data.ProtoLens.OptionalField maybe'shape)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedSliceMeta
                              +                type'__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "type"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Types.DataType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional type')
                              +                      :: Data.ProtoLens.FieldDescriptor SavedSliceMeta
                              +                slice__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "slice"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.TensorSlice.TensorSliceProto)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked slice)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedSliceMeta
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SavedSliceMeta")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, shape__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, type'__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, slice__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("shape", shape__field_descriptor),
                              +                    ("type", type'__field_descriptor),
                              +                    ("slice", slice__field_descriptor)])
                              +
                              +data SavedTensorSliceMeta = SavedTensorSliceMeta{_SavedTensorSliceMeta'tensor
                              +                                                 :: ![SavedSliceMeta],
                              +                                                 _SavedTensorSliceMeta'versions ::
                              +                                                 !(Prelude.Maybe
                              +                                                     Proto.Tensorflow.Core.Framework.Versions.VersionDef)}
                              +                          deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [SavedSliceMeta], b ~ [SavedSliceMeta],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "tensor" f SavedTensorSliceMeta
                              +           SavedTensorSliceMeta
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedTensorSliceMeta'tensor
                              +                 (\ x__ y__ -> x__{_SavedTensorSliceMeta'tensor = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          b ~ Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "versions" f SavedTensorSliceMeta
                              +           SavedTensorSliceMeta
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedTensorSliceMeta'versions
                              +                 (\ x__ y__ -> x__{_SavedTensorSliceMeta'versions = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          b ~
                              +            Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'versions" f SavedTensorSliceMeta
                              +           SavedTensorSliceMeta
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedTensorSliceMeta'versions
                              +                 (\ x__ y__ -> x__{_SavedTensorSliceMeta'versions = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SavedTensorSliceMeta where
                              +        def
                              +          = SavedTensorSliceMeta{_SavedTensorSliceMeta'tensor = [],
                              +                                 _SavedTensorSliceMeta'versions = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message SavedTensorSliceMeta where
                              +        descriptor
                              +          = let tensor__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "tensor"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SavedSliceMeta)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked tensor)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedTensorSliceMeta
                              +                versions__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "versions"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor
                              +                           Proto.Tensorflow.Core.Framework.Versions.VersionDef)
                              +                      (Data.ProtoLens.OptionalField maybe'versions)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedTensorSliceMeta
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SavedTensorSliceMeta")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, tensor__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, versions__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("tensor", tensor__field_descriptor),
                              +                    ("versions", versions__field_descriptor)])
                              +
                              +data SavedTensorSlices = SavedTensorSlices{_SavedTensorSlices'meta
                              +                                           :: !(Prelude.Maybe SavedTensorSliceMeta),
                              +                                           _SavedTensorSlices'data' :: !(Prelude.Maybe SavedSlice)}
                              +                       deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ SavedTensorSliceMeta, b ~ SavedTensorSliceMeta,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "meta" f SavedTensorSlices SavedTensorSlices a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedTensorSlices'meta
                              +                 (\ x__ y__ -> x__{_SavedTensorSlices'meta = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe SavedTensorSliceMeta,
                              +          b ~ Prelude.Maybe SavedTensorSliceMeta, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'meta" f SavedTensorSlices
                              +           SavedTensorSlices
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedTensorSlices'meta
                              +                 (\ x__ y__ -> x__{_SavedTensorSlices'meta = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ SavedSlice, b ~ SavedSlice, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "data'" f SavedTensorSlices SavedTensorSlices a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedTensorSlices'data'
                              +                 (\ x__ y__ -> x__{_SavedTensorSlices'data' = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe SavedSlice,
                              +          b ~ Prelude.Maybe SavedSlice, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'data'" f SavedTensorSlices
                              +           SavedTensorSlices
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _SavedTensorSlices'data'
                              +                 (\ x__ y__ -> x__{_SavedTensorSlices'data' = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default SavedTensorSlices where
                              +        def
                              +          = SavedTensorSlices{_SavedTensorSlices'meta = Prelude.Nothing,
                              +                              _SavedTensorSlices'data' = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message SavedTensorSlices where
                              +        descriptor
                              +          = let meta__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "meta"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SavedTensorSliceMeta)
                              +                      (Data.ProtoLens.OptionalField maybe'meta)
                              +                      :: Data.ProtoLens.FieldDescriptor SavedTensorSlices
                              +                data'__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "data"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor SavedSlice)
                              +                      (Data.ProtoLens.OptionalField maybe'data')
                              +                      :: Data.ProtoLens.FieldDescriptor SavedTensorSlices
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.SavedTensorSlices")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, meta__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, data'__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("meta", meta__field_descriptor),
                              +                    ("data", data'__field_descriptor)])
                              +
                              +data' ::
                              +      forall f s t a b . (Lens.Labels.HasLens "data'" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +data'
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "data'")
                              +
                              +maybe'data' ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'data'" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'data'
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'data'")
                              +
                              +maybe'meta ::
                              +           forall f s t a b . (Lens.Labels.HasLens "maybe'meta" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +maybe'meta
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'meta")
                              +
                              +maybe'shape ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'shape" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'shape")
                              +
                              +maybe'slice ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'slice" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'slice
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'slice")
                              +
                              +maybe'versions ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'versions" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'versions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'versions")
                              +
                              +meta ::
                              +     forall f s t a b . (Lens.Labels.HasLens "meta" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +meta
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "meta")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +shape ::
                              +      forall f s t a b . (Lens.Labels.HasLens "shape" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +shape
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "shape")
                              +
                              +slice ::
                              +      forall f s t a b . (Lens.Labels.HasLens "slice" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +slice
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "slice")
                              +
                              +tensor ::
                              +       forall f s t a b . (Lens.Labels.HasLens "tensor" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +tensor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "tensor")
                              +
                              +type' ::
                              +      forall f s t a b . (Lens.Labels.HasLens "type'" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +type'
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "type'")
                              +
                              +versions ::
                              +         forall f s t a b . (Lens.Labels.HasLens "versions" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +versions
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "versions")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.TestLog.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.TestLog.html new file mode 100644 index 0000000..2892ec7 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto.Tensorflow.Core.Util.TestLog.html @@ -0,0 +1,2282 @@ +
                              {- This file was auto-generated from tensorflow/core/util/test_log.proto by the proto-lens-protoc program. -}
                              +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
                              +  UndecidableInstances, MultiParamTypeClasses, FlexibleContexts,
                              +  FlexibleInstances, PatternSynonyms, MagicHash, NoImplicitPrelude
                              +  #-}
                              +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
                              +module Proto.Tensorflow.Core.Util.TestLog where
                              +import qualified Data.ProtoLens.Reexport.Prelude as Prelude
                              +import qualified Data.ProtoLens.Reexport.Data.Int as Data.Int
                              +import qualified Data.ProtoLens.Reexport.Data.Word as Data.Word
                              +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
                              +       as Data.ProtoLens
                              +import qualified
                              +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
                              +       as Data.ProtoLens.Message.Enum
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2
                              +       as Lens.Family2
                              +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
                              +       as Lens.Family2.Unchecked
                              +import qualified Data.ProtoLens.Reexport.Data.Default.Class
                              +       as Data.Default.Class
                              +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
                              +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
                              +import qualified Data.ProtoLens.Reexport.Data.ByteString
                              +       as Data.ByteString
                              +import qualified Data.ProtoLens.Reexport.Lens.Labels as Lens.Labels
                              +import qualified Proto.Google.Protobuf.Any
                              +
                              +data AvailableDeviceInfo = AvailableDeviceInfo{_AvailableDeviceInfo'name
                              +                                               :: !Data.Text.Text,
                              +                                               _AvailableDeviceInfo'type' :: !Data.Text.Text,
                              +                                               _AvailableDeviceInfo'memoryLimit :: !Data.Int.Int64,
                              +                                               _AvailableDeviceInfo'physicalDescription ::
                              +                                               !Data.Text.Text}
                              +                         deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f AvailableDeviceInfo
                              +           AvailableDeviceInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AvailableDeviceInfo'name
                              +                 (\ x__ y__ -> x__{_AvailableDeviceInfo'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "type'" f AvailableDeviceInfo
                              +           AvailableDeviceInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AvailableDeviceInfo'type'
                              +                 (\ x__ y__ -> x__{_AvailableDeviceInfo'type' = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "memoryLimit" f AvailableDeviceInfo
                              +           AvailableDeviceInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _AvailableDeviceInfo'memoryLimit
                              +                 (\ x__ y__ -> x__{_AvailableDeviceInfo'memoryLimit = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "physicalDescription" f AvailableDeviceInfo
                              +           AvailableDeviceInfo
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _AvailableDeviceInfo'physicalDescription
                              +                 (\ x__ y__ -> x__{_AvailableDeviceInfo'physicalDescription = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default AvailableDeviceInfo where
                              +        def
                              +          = AvailableDeviceInfo{_AvailableDeviceInfo'name =
                              +                                  Data.ProtoLens.fieldDefault,
                              +                                _AvailableDeviceInfo'type' = Data.ProtoLens.fieldDefault,
                              +                                _AvailableDeviceInfo'memoryLimit = Data.ProtoLens.fieldDefault,
                              +                                _AvailableDeviceInfo'physicalDescription =
                              +                                  Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message AvailableDeviceInfo where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor AvailableDeviceInfo
                              +                type'__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "type"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional type')
                              +                      :: Data.ProtoLens.FieldDescriptor AvailableDeviceInfo
                              +                memoryLimit__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "memory_limit"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional memoryLimit)
                              +                      :: Data.ProtoLens.FieldDescriptor AvailableDeviceInfo
                              +                physicalDescription__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "physical_description"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         physicalDescription)
                              +                      :: Data.ProtoLens.FieldDescriptor AvailableDeviceInfo
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.AvailableDeviceInfo")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, type'__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, memoryLimit__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, physicalDescription__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("type", type'__field_descriptor),
                              +                    ("memory_limit", memoryLimit__field_descriptor),
                              +                    ("physical_description", physicalDescription__field_descriptor)])
                              +
                              +data BenchmarkEntries = BenchmarkEntries{_BenchmarkEntries'entry ::
                              +                                         ![BenchmarkEntry]}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [BenchmarkEntry], b ~ [BenchmarkEntry],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "entry" f BenchmarkEntries BenchmarkEntries a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntries'entry
                              +                 (\ x__ y__ -> x__{_BenchmarkEntries'entry = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default BenchmarkEntries where
                              +        def = BenchmarkEntries{_BenchmarkEntries'entry = []}
                              +
                              +instance Data.ProtoLens.Message BenchmarkEntries where
                              +        descriptor
                              +          = let entry__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "entry"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor BenchmarkEntry)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked entry)
                              +                      :: Data.ProtoLens.FieldDescriptor BenchmarkEntries
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.BenchmarkEntries")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, entry__field_descriptor)])
                              +                (Data.Map.fromList [("entry", entry__field_descriptor)])
                              +
                              +data BenchmarkEntry = BenchmarkEntry{_BenchmarkEntry'name ::
                              +                                     !Data.Text.Text,
                              +                                     _BenchmarkEntry'iters :: !Data.Int.Int64,
                              +                                     _BenchmarkEntry'cpuTime :: !Prelude.Double,
                              +                                     _BenchmarkEntry'wallTime :: !Prelude.Double,
                              +                                     _BenchmarkEntry'throughput :: !Prelude.Double,
                              +                                     _BenchmarkEntry'extras ::
                              +                                     !(Data.Map.Map Data.Text.Text EntryValue)}
                              +                    deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f BenchmarkEntry BenchmarkEntry a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntry'name
                              +                 (\ x__ y__ -> x__{_BenchmarkEntry'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "iters" f BenchmarkEntry BenchmarkEntry a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntry'iters
                              +                 (\ x__ y__ -> x__{_BenchmarkEntry'iters = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "cpuTime" f BenchmarkEntry BenchmarkEntry a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntry'cpuTime
                              +                 (\ x__ y__ -> x__{_BenchmarkEntry'cpuTime = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "wallTime" f BenchmarkEntry BenchmarkEntry a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntry'wallTime
                              +                 (\ x__ y__ -> x__{_BenchmarkEntry'wallTime = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "throughput" f BenchmarkEntry BenchmarkEntry a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntry'throughput
                              +                 (\ x__ y__ -> x__{_BenchmarkEntry'throughput = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text EntryValue,
                              +          b ~ Data.Map.Map Data.Text.Text EntryValue, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "extras" f BenchmarkEntry BenchmarkEntry a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntry'extras
                              +                 (\ x__ y__ -> x__{_BenchmarkEntry'extras = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default BenchmarkEntry where
                              +        def
                              +          = BenchmarkEntry{_BenchmarkEntry'name =
                              +                             Data.ProtoLens.fieldDefault,
                              +                           _BenchmarkEntry'iters = Data.ProtoLens.fieldDefault,
                              +                           _BenchmarkEntry'cpuTime = Data.ProtoLens.fieldDefault,
                              +                           _BenchmarkEntry'wallTime = Data.ProtoLens.fieldDefault,
                              +                           _BenchmarkEntry'throughput = Data.ProtoLens.fieldDefault,
                              +                           _BenchmarkEntry'extras = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message BenchmarkEntry where
                              +        descriptor
                              +          = let name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor BenchmarkEntry
                              +                iters__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "iters"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional iters)
                              +                      :: Data.ProtoLens.FieldDescriptor BenchmarkEntry
                              +                cpuTime__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cpu_time"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional cpuTime)
                              +                      :: Data.ProtoLens.FieldDescriptor BenchmarkEntry
                              +                wallTime__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "wall_time"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional wallTime)
                              +                      :: Data.ProtoLens.FieldDescriptor BenchmarkEntry
                              +                throughput__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "throughput"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional throughput)
                              +                      :: Data.ProtoLens.FieldDescriptor BenchmarkEntry
                              +                extras__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "extras"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor BenchmarkEntry'ExtrasEntry)
                              +                      (Data.ProtoLens.MapField key value extras)
                              +                      :: Data.ProtoLens.FieldDescriptor BenchmarkEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.BenchmarkEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, iters__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, cpuTime__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, wallTime__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, throughput__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, extras__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("name", name__field_descriptor),
                              +                    ("iters", iters__field_descriptor),
                              +                    ("cpu_time", cpuTime__field_descriptor),
                              +                    ("wall_time", wallTime__field_descriptor),
                              +                    ("throughput", throughput__field_descriptor),
                              +                    ("extras", extras__field_descriptor)])
                              +
                              +data BenchmarkEntry'ExtrasEntry = BenchmarkEntry'ExtrasEntry{_BenchmarkEntry'ExtrasEntry'key
                              +                                                             :: !Data.Text.Text,
                              +                                                             _BenchmarkEntry'ExtrasEntry'value ::
                              +                                                             !(Prelude.Maybe EntryValue)}
                              +                                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f BenchmarkEntry'ExtrasEntry
                              +           BenchmarkEntry'ExtrasEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntry'ExtrasEntry'key
                              +                 (\ x__ y__ -> x__{_BenchmarkEntry'ExtrasEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ EntryValue, b ~ EntryValue, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f BenchmarkEntry'ExtrasEntry
                              +           BenchmarkEntry'ExtrasEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntry'ExtrasEntry'value
                              +                 (\ x__ y__ -> x__{_BenchmarkEntry'ExtrasEntry'value = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe EntryValue,
                              +          b ~ Prelude.Maybe EntryValue, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'value" f BenchmarkEntry'ExtrasEntry
                              +           BenchmarkEntry'ExtrasEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BenchmarkEntry'ExtrasEntry'value
                              +                 (\ x__ y__ -> x__{_BenchmarkEntry'ExtrasEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default BenchmarkEntry'ExtrasEntry
                              +         where
                              +        def
                              +          = BenchmarkEntry'ExtrasEntry{_BenchmarkEntry'ExtrasEntry'key =
                              +                                         Data.ProtoLens.fieldDefault,
                              +                                       _BenchmarkEntry'ExtrasEntry'value = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message BenchmarkEntry'ExtrasEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor BenchmarkEntry'ExtrasEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor EntryValue)
                              +                      (Data.ProtoLens.OptionalField maybe'value)
                              +                      :: Data.ProtoLens.FieldDescriptor BenchmarkEntry'ExtrasEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.BenchmarkEntry.ExtrasEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data BuildConfiguration = BuildConfiguration{_BuildConfiguration'mode
                              +                                             :: !Data.Text.Text,
                              +                                             _BuildConfiguration'ccFlags :: ![Data.Text.Text],
                              +                                             _BuildConfiguration'opts :: ![Data.Text.Text]}
                              +                        deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "mode" f BuildConfiguration BuildConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BuildConfiguration'mode
                              +                 (\ x__ y__ -> x__{_BuildConfiguration'mode = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "ccFlags" f BuildConfiguration
                              +           BuildConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BuildConfiguration'ccFlags
                              +                 (\ x__ y__ -> x__{_BuildConfiguration'ccFlags = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "opts" f BuildConfiguration BuildConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _BuildConfiguration'opts
                              +                 (\ x__ y__ -> x__{_BuildConfiguration'opts = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default BuildConfiguration where
                              +        def
                              +          = BuildConfiguration{_BuildConfiguration'mode =
                              +                                 Data.ProtoLens.fieldDefault,
                              +                               _BuildConfiguration'ccFlags = [], _BuildConfiguration'opts = []}
                              +
                              +instance Data.ProtoLens.Message BuildConfiguration where
                              +        descriptor
                              +          = let mode__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "mode"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional mode)
                              +                      :: Data.ProtoLens.FieldDescriptor BuildConfiguration
                              +                ccFlags__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cc_flags"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked ccFlags)
                              +                      :: Data.ProtoLens.FieldDescriptor BuildConfiguration
                              +                opts__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "opts"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked opts)
                              +                      :: Data.ProtoLens.FieldDescriptor BuildConfiguration
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.BuildConfiguration")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, mode__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, ccFlags__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, opts__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("mode", mode__field_descriptor),
                              +                    ("cc_flags", ccFlags__field_descriptor),
                              +                    ("opts", opts__field_descriptor)])
                              +
                              +data CPUInfo = CPUInfo{_CPUInfo'numCores :: !Data.Int.Int64,
                              +                       _CPUInfo'numCoresAllowed :: !Data.Int.Int64,
                              +                       _CPUInfo'mhzPerCpu :: !Prelude.Double,
                              +                       _CPUInfo'cpuInfo :: !Data.Text.Text,
                              +                       _CPUInfo'cpuGovernor :: !Data.Text.Text,
                              +                       _CPUInfo'cacheSize ::
                              +                       !(Data.Map.Map Data.Text.Text Data.Int.Int64)}
                              +             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "numCores" f CPUInfo CPUInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CPUInfo'numCores
                              +                 (\ x__ y__ -> x__{_CPUInfo'numCores = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "numCoresAllowed" f CPUInfo CPUInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CPUInfo'numCoresAllowed
                              +                 (\ x__ y__ -> x__{_CPUInfo'numCoresAllowed = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "mhzPerCpu" f CPUInfo CPUInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CPUInfo'mhzPerCpu
                              +                 (\ x__ y__ -> x__{_CPUInfo'mhzPerCpu = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "cpuInfo" f CPUInfo CPUInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CPUInfo'cpuInfo
                              +                 (\ x__ y__ -> x__{_CPUInfo'cpuInfo = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "cpuGovernor" f CPUInfo CPUInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CPUInfo'cpuGovernor
                              +                 (\ x__ y__ -> x__{_CPUInfo'cpuGovernor = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Map.Map Data.Text.Text Data.Int.Int64,
                              +          b ~ Data.Map.Map Data.Text.Text Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "cacheSize" f CPUInfo CPUInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CPUInfo'cacheSize
                              +                 (\ x__ y__ -> x__{_CPUInfo'cacheSize = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CPUInfo where
                              +        def
                              +          = CPUInfo{_CPUInfo'numCores = Data.ProtoLens.fieldDefault,
                              +                    _CPUInfo'numCoresAllowed = Data.ProtoLens.fieldDefault,
                              +                    _CPUInfo'mhzPerCpu = Data.ProtoLens.fieldDefault,
                              +                    _CPUInfo'cpuInfo = Data.ProtoLens.fieldDefault,
                              +                    _CPUInfo'cpuGovernor = Data.ProtoLens.fieldDefault,
                              +                    _CPUInfo'cacheSize = Data.Map.empty}
                              +
                              +instance Data.ProtoLens.Message CPUInfo where
                              +        descriptor
                              +          = let numCores__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "num_cores"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numCores)
                              +                      :: Data.ProtoLens.FieldDescriptor CPUInfo
                              +                numCoresAllowed__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "num_cores_allowed"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numCoresAllowed)
                              +                      :: Data.ProtoLens.FieldDescriptor CPUInfo
                              +                mhzPerCpu__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "mhz_per_cpu"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional mhzPerCpu)
                              +                      :: Data.ProtoLens.FieldDescriptor CPUInfo
                              +                cpuInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cpu_info"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional cpuInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor CPUInfo
                              +                cpuGovernor__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cpu_governor"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional cpuGovernor)
                              +                      :: Data.ProtoLens.FieldDescriptor CPUInfo
                              +                cacheSize__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cache_size"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CPUInfo'CacheSizeEntry)
                              +                      (Data.ProtoLens.MapField key value cacheSize)
                              +                      :: Data.ProtoLens.FieldDescriptor CPUInfo
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CPUInfo")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, numCores__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, numCoresAllowed__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, mhzPerCpu__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, cpuInfo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, cpuGovernor__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, cacheSize__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("num_cores", numCores__field_descriptor),
                              +                    ("num_cores_allowed", numCoresAllowed__field_descriptor),
                              +                    ("mhz_per_cpu", mhzPerCpu__field_descriptor),
                              +                    ("cpu_info", cpuInfo__field_descriptor),
                              +                    ("cpu_governor", cpuGovernor__field_descriptor),
                              +                    ("cache_size", cacheSize__field_descriptor)])
                              +
                              +data CPUInfo'CacheSizeEntry = CPUInfo'CacheSizeEntry{_CPUInfo'CacheSizeEntry'key
                              +                                                     :: !Data.Text.Text,
                              +                                                     _CPUInfo'CacheSizeEntry'value ::
                              +                                                     !Data.Int.Int64}
                              +                            deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "key" f CPUInfo'CacheSizeEntry
                              +           CPUInfo'CacheSizeEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CPUInfo'CacheSizeEntry'key
                              +                 (\ x__ y__ -> x__{_CPUInfo'CacheSizeEntry'key = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "value" f CPUInfo'CacheSizeEntry
                              +           CPUInfo'CacheSizeEntry
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CPUInfo'CacheSizeEntry'value
                              +                 (\ x__ y__ -> x__{_CPUInfo'CacheSizeEntry'value = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default CPUInfo'CacheSizeEntry where
                              +        def
                              +          = CPUInfo'CacheSizeEntry{_CPUInfo'CacheSizeEntry'key =
                              +                                     Data.ProtoLens.fieldDefault,
                              +                                   _CPUInfo'CacheSizeEntry'value = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message CPUInfo'CacheSizeEntry where
                              +        descriptor
                              +          = let key__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "key"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
                              +                      :: Data.ProtoLens.FieldDescriptor CPUInfo'CacheSizeEntry
                              +                value__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "value"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional value)
                              +                      :: Data.ProtoLens.FieldDescriptor CPUInfo'CacheSizeEntry
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CPUInfo.CacheSizeEntry")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("key", key__field_descriptor),
                              +                    ("value", value__field_descriptor)])
                              +
                              +data CommitId = CommitId{_CommitId'snapshot :: !Data.Text.Text,
                              +                         _CommitId'kind :: !(Prelude.Maybe CommitId'Kind)}
                              +              deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data CommitId'Kind = CommitId'Changelist !Data.Int.Int64
                              +                   | CommitId'Hash !Data.Text.Text
                              +                   deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "snapshot" f CommitId CommitId a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CommitId'snapshot
                              +                 (\ x__ y__ -> x__{_CommitId'snapshot = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe CommitId'Kind,
                              +          b ~ Prelude.Maybe CommitId'Kind, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'kind" f CommitId CommitId a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CommitId'kind
                              +                 (\ x__ y__ -> x__{_CommitId'kind = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe Data.Int.Int64,
                              +          b ~ Prelude.Maybe Data.Int.Int64, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'changelist" f CommitId CommitId a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CommitId'kind
                              +                 (\ x__ y__ -> x__{_CommitId'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (CommitId'Changelist x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap CommitId'Changelist y__))
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "changelist" f CommitId CommitId a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CommitId'kind
                              +                 (\ x__ y__ -> x__{_CommitId'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (CommitId'Changelist x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap CommitId'Changelist y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~ Prelude.Maybe Data.Text.Text,
                              +          b ~ Prelude.Maybe Data.Text.Text, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'hash" f CommitId CommitId a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CommitId'kind
                              +                 (\ x__ y__ -> x__{_CommitId'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (CommitId'Hash x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap CommitId'Hash y__))
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hash" f CommitId CommitId a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _CommitId'kind
                              +                 (\ x__ y__ -> x__{_CommitId'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (CommitId'Hash x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap CommitId'Hash y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance Data.Default.Class.Default CommitId where
                              +        def
                              +          = CommitId{_CommitId'snapshot = Data.ProtoLens.fieldDefault,
                              +                     _CommitId'kind = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message CommitId where
                              +        descriptor
                              +          = let snapshot__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "snapshot"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional snapshot)
                              +                      :: Data.ProtoLens.FieldDescriptor CommitId
                              +                changelist__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "changelist"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.OptionalField maybe'changelist)
                              +                      :: Data.ProtoLens.FieldDescriptor CommitId
                              +                hash__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "hash"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.OptionalField maybe'hash)
                              +                      :: Data.ProtoLens.FieldDescriptor CommitId
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.CommitId")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 3, snapshot__field_descriptor),
                              +                    (Data.ProtoLens.Tag 1, changelist__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, hash__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("snapshot", snapshot__field_descriptor),
                              +                    ("changelist", changelist__field_descriptor),
                              +                    ("hash", hash__field_descriptor)])
                              +
                              +data EntryValue = EntryValue{_EntryValue'kind ::
                              +                             !(Prelude.Maybe EntryValue'Kind)}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +data EntryValue'Kind = EntryValue'DoubleValue !Prelude.Double
                              +                     | EntryValue'StringValue !Data.Text.Text
                              +                     deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Prelude.Maybe EntryValue'Kind,
                              +          b ~ Prelude.Maybe EntryValue'Kind, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'kind" f EntryValue EntryValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _EntryValue'kind
                              +                 (\ x__ y__ -> x__{_EntryValue'kind = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Maybe Prelude.Double,
                              +          b ~ Prelude.Maybe Prelude.Double, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'doubleValue" f EntryValue EntryValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _EntryValue'kind
                              +                 (\ x__ y__ -> x__{_EntryValue'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (EntryValue'DoubleValue x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap EntryValue'DoubleValue y__))
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "doubleValue" f EntryValue EntryValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _EntryValue'kind
                              +                 (\ x__ y__ -> x__{_EntryValue'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (EntryValue'DoubleValue x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap EntryValue'DoubleValue y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance (a ~ Prelude.Maybe Data.Text.Text,
                              +          b ~ Prelude.Maybe Data.Text.Text, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'stringValue" f EntryValue EntryValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _EntryValue'kind
                              +                 (\ x__ y__ -> x__{_EntryValue'kind = y__}))
                              +              (Lens.Family2.Unchecked.lens
                              +                 (\ x__ ->
                              +                    case x__ of
                              +                        Prelude.Just (EntryValue'StringValue x__val) -> Prelude.Just x__val
                              +                        _otherwise -> Prelude.Nothing)
                              +                 (\ _ y__ -> Prelude.fmap EntryValue'StringValue y__))
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "stringValue" f EntryValue EntryValue a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _EntryValue'kind
                              +                 (\ x__ y__ -> x__{_EntryValue'kind = y__}))
                              +              ((Prelude..)
                              +                 (Lens.Family2.Unchecked.lens
                              +                    (\ x__ ->
                              +                       case x__ of
                              +                           Prelude.Just (EntryValue'StringValue x__val) -> Prelude.Just x__val
                              +                           _otherwise -> Prelude.Nothing)
                              +                    (\ _ y__ -> Prelude.fmap EntryValue'StringValue y__))
                              +                 (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault))
                              +
                              +instance Data.Default.Class.Default EntryValue where
                              +        def = EntryValue{_EntryValue'kind = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message EntryValue where
                              +        descriptor
                              +          = let doubleValue__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "double_value"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.OptionalField maybe'doubleValue)
                              +                      :: Data.ProtoLens.FieldDescriptor EntryValue
                              +                stringValue__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "string_value"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.OptionalField maybe'stringValue)
                              +                      :: Data.ProtoLens.FieldDescriptor EntryValue
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.EntryValue")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, doubleValue__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, stringValue__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("double_value", doubleValue__field_descriptor),
                              +                    ("string_value", stringValue__field_descriptor)])
                              +
                              +data GPUInfo = GPUInfo{_GPUInfo'model :: !Data.Text.Text,
                              +                       _GPUInfo'uuid :: !Data.Text.Text,
                              +                       _GPUInfo'busId :: !Data.Text.Text}
                              +             deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "model" f GPUInfo GPUInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUInfo'model
                              +                 (\ x__ y__ -> x__{_GPUInfo'model = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "uuid" f GPUInfo GPUInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUInfo'uuid
                              +                 (\ x__ y__ -> x__{_GPUInfo'uuid = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "busId" f GPUInfo GPUInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _GPUInfo'busId
                              +                 (\ x__ y__ -> x__{_GPUInfo'busId = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default GPUInfo where
                              +        def
                              +          = GPUInfo{_GPUInfo'model = Data.ProtoLens.fieldDefault,
                              +                    _GPUInfo'uuid = Data.ProtoLens.fieldDefault,
                              +                    _GPUInfo'busId = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message GPUInfo where
                              +        descriptor
                              +          = let model__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "model"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional model)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUInfo
                              +                uuid__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "uuid"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional uuid)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUInfo
                              +                busId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "bus_id"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional busId)
                              +                      :: Data.ProtoLens.FieldDescriptor GPUInfo
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.GPUInfo")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, model__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, uuid__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, busId__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("model", model__field_descriptor),
                              +                    ("uuid", uuid__field_descriptor),
                              +                    ("bus_id", busId__field_descriptor)])
                              +
                              +data MachineConfiguration = MachineConfiguration{_MachineConfiguration'hostname
                              +                                                 :: !Data.Text.Text,
                              +                                                 _MachineConfiguration'serialIdentifier ::
                              +                                                 !Data.Text.Text,
                              +                                                 _MachineConfiguration'platformInfo ::
                              +                                                 !(Prelude.Maybe PlatformInfo),
                              +                                                 _MachineConfiguration'cpuInfo ::
                              +                                                 !(Prelude.Maybe CPUInfo),
                              +                                                 _MachineConfiguration'deviceInfo ::
                              +                                                 ![Proto.Google.Protobuf.Any.Any],
                              +                                                 _MachineConfiguration'availableDeviceInfo ::
                              +                                                 ![AvailableDeviceInfo],
                              +                                                 _MachineConfiguration'memoryInfo ::
                              +                                                 !(Prelude.Maybe MemoryInfo)}
                              +                          deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "hostname" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MachineConfiguration'hostname
                              +                 (\ x__ y__ -> x__{_MachineConfiguration'hostname = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "serialIdentifier" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MachineConfiguration'serialIdentifier
                              +                 (\ x__ y__ -> x__{_MachineConfiguration'serialIdentifier = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ PlatformInfo, b ~ PlatformInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "platformInfo" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MachineConfiguration'platformInfo
                              +                 (\ x__ y__ -> x__{_MachineConfiguration'platformInfo = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe PlatformInfo,
                              +          b ~ Prelude.Maybe PlatformInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'platformInfo" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MachineConfiguration'platformInfo
                              +                 (\ x__ y__ -> x__{_MachineConfiguration'platformInfo = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ CPUInfo, b ~ CPUInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "cpuInfo" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MachineConfiguration'cpuInfo
                              +                 (\ x__ y__ -> x__{_MachineConfiguration'cpuInfo = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe CPUInfo, b ~ Prelude.Maybe CPUInfo,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'cpuInfo" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MachineConfiguration'cpuInfo
                              +                 (\ x__ y__ -> x__{_MachineConfiguration'cpuInfo = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [Proto.Google.Protobuf.Any.Any],
                              +          b ~ [Proto.Google.Protobuf.Any.Any], Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "deviceInfo" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MachineConfiguration'deviceInfo
                              +                 (\ x__ y__ -> x__{_MachineConfiguration'deviceInfo = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ [AvailableDeviceInfo], b ~ [AvailableDeviceInfo],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "availableDeviceInfo" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens
                              +                 _MachineConfiguration'availableDeviceInfo
                              +                 (\ x__ y__ ->
                              +                    x__{_MachineConfiguration'availableDeviceInfo = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ MemoryInfo, b ~ MemoryInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "memoryInfo" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MachineConfiguration'memoryInfo
                              +                 (\ x__ y__ -> x__{_MachineConfiguration'memoryInfo = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe MemoryInfo,
                              +          b ~ Prelude.Maybe MemoryInfo, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'memoryInfo" f MachineConfiguration
                              +           MachineConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MachineConfiguration'memoryInfo
                              +                 (\ x__ y__ -> x__{_MachineConfiguration'memoryInfo = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MachineConfiguration where
                              +        def
                              +          = MachineConfiguration{_MachineConfiguration'hostname =
                              +                                   Data.ProtoLens.fieldDefault,
                              +                                 _MachineConfiguration'serialIdentifier =
                              +                                   Data.ProtoLens.fieldDefault,
                              +                                 _MachineConfiguration'platformInfo = Prelude.Nothing,
                              +                                 _MachineConfiguration'cpuInfo = Prelude.Nothing,
                              +                                 _MachineConfiguration'deviceInfo = [],
                              +                                 _MachineConfiguration'availableDeviceInfo = [],
                              +                                 _MachineConfiguration'memoryInfo = Prelude.Nothing}
                              +
                              +instance Data.ProtoLens.Message MachineConfiguration where
                              +        descriptor
                              +          = let hostname__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "hostname"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional hostname)
                              +                      :: Data.ProtoLens.FieldDescriptor MachineConfiguration
                              +                serialIdentifier__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "serial_identifier"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
                              +                         serialIdentifier)
                              +                      :: Data.ProtoLens.FieldDescriptor MachineConfiguration
                              +                platformInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "platform_info"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor PlatformInfo)
                              +                      (Data.ProtoLens.OptionalField maybe'platformInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor MachineConfiguration
                              +                cpuInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "cpu_info"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CPUInfo)
                              +                      (Data.ProtoLens.OptionalField maybe'cpuInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor MachineConfiguration
                              +                deviceInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "device_info"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Proto.Google.Protobuf.Any.Any)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked deviceInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor MachineConfiguration
                              +                availableDeviceInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "available_device_info"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor AvailableDeviceInfo)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
                              +                         availableDeviceInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor MachineConfiguration
                              +                memoryInfo__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "memory_info"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor MemoryInfo)
                              +                      (Data.ProtoLens.OptionalField maybe'memoryInfo)
                              +                      :: Data.ProtoLens.FieldDescriptor MachineConfiguration
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MachineConfiguration")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, hostname__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, serialIdentifier__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, platformInfo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, cpuInfo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, deviceInfo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, availableDeviceInfo__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, memoryInfo__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("hostname", hostname__field_descriptor),
                              +                    ("serial_identifier", serialIdentifier__field_descriptor),
                              +                    ("platform_info", platformInfo__field_descriptor),
                              +                    ("cpu_info", cpuInfo__field_descriptor),
                              +                    ("device_info", deviceInfo__field_descriptor),
                              +                    ("available_device_info", availableDeviceInfo__field_descriptor),
                              +                    ("memory_info", memoryInfo__field_descriptor)])
                              +
                              +data MemoryInfo = MemoryInfo{_MemoryInfo'total :: !Data.Int.Int64,
                              +                             _MemoryInfo'available :: !Data.Int.Int64}
                              +                deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "total" f MemoryInfo MemoryInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryInfo'total
                              +                 (\ x__ y__ -> x__{_MemoryInfo'total = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "available" f MemoryInfo MemoryInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _MemoryInfo'available
                              +                 (\ x__ y__ -> x__{_MemoryInfo'available = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default MemoryInfo where
                              +        def
                              +          = MemoryInfo{_MemoryInfo'total = Data.ProtoLens.fieldDefault,
                              +                       _MemoryInfo'available = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message MemoryInfo where
                              +        descriptor
                              +          = let total__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "total"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional total)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryInfo
                              +                available__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "available"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional available)
                              +                      :: Data.ProtoLens.FieldDescriptor MemoryInfo
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.MemoryInfo")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, total__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, available__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("total", total__field_descriptor),
                              +                    ("available", available__field_descriptor)])
                              +
                              +data PlatformInfo = PlatformInfo{_PlatformInfo'bits ::
                              +                                 !Data.Text.Text,
                              +                                 _PlatformInfo'linkage :: !Data.Text.Text,
                              +                                 _PlatformInfo'machine :: !Data.Text.Text,
                              +                                 _PlatformInfo'release :: !Data.Text.Text,
                              +                                 _PlatformInfo'system :: !Data.Text.Text,
                              +                                 _PlatformInfo'version :: !Data.Text.Text}
                              +                  deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "bits" f PlatformInfo PlatformInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _PlatformInfo'bits
                              +                 (\ x__ y__ -> x__{_PlatformInfo'bits = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "linkage" f PlatformInfo PlatformInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _PlatformInfo'linkage
                              +                 (\ x__ y__ -> x__{_PlatformInfo'linkage = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "machine" f PlatformInfo PlatformInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _PlatformInfo'machine
                              +                 (\ x__ y__ -> x__{_PlatformInfo'machine = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "release" f PlatformInfo PlatformInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _PlatformInfo'release
                              +                 (\ x__ y__ -> x__{_PlatformInfo'release = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "system" f PlatformInfo PlatformInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _PlatformInfo'system
                              +                 (\ x__ y__ -> x__{_PlatformInfo'system = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "version" f PlatformInfo PlatformInfo a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _PlatformInfo'version
                              +                 (\ x__ y__ -> x__{_PlatformInfo'version = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default PlatformInfo where
                              +        def
                              +          = PlatformInfo{_PlatformInfo'bits = Data.ProtoLens.fieldDefault,
                              +                         _PlatformInfo'linkage = Data.ProtoLens.fieldDefault,
                              +                         _PlatformInfo'machine = Data.ProtoLens.fieldDefault,
                              +                         _PlatformInfo'release = Data.ProtoLens.fieldDefault,
                              +                         _PlatformInfo'system = Data.ProtoLens.fieldDefault,
                              +                         _PlatformInfo'version = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message PlatformInfo where
                              +        descriptor
                              +          = let bits__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "bits"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional bits)
                              +                      :: Data.ProtoLens.FieldDescriptor PlatformInfo
                              +                linkage__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "linkage"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional linkage)
                              +                      :: Data.ProtoLens.FieldDescriptor PlatformInfo
                              +                machine__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "machine"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional machine)
                              +                      :: Data.ProtoLens.FieldDescriptor PlatformInfo
                              +                release__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "release"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional release)
                              +                      :: Data.ProtoLens.FieldDescriptor PlatformInfo
                              +                system__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "system"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional system)
                              +                      :: Data.ProtoLens.FieldDescriptor PlatformInfo
                              +                version__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "version"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional version)
                              +                      :: Data.ProtoLens.FieldDescriptor PlatformInfo
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.PlatformInfo")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, bits__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, linkage__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, machine__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, release__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, system__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, version__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("bits", bits__field_descriptor),
                              +                    ("linkage", linkage__field_descriptor),
                              +                    ("machine", machine__field_descriptor),
                              +                    ("release", release__field_descriptor),
                              +                    ("system", system__field_descriptor),
                              +                    ("version", version__field_descriptor)])
                              +
                              +data RunConfiguration = RunConfiguration{_RunConfiguration'argument
                              +                                         :: ![Data.Text.Text]}
                              +                      deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ [Data.Text.Text], b ~ [Data.Text.Text],
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "argument" f RunConfiguration RunConfiguration
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _RunConfiguration'argument
                              +                 (\ x__ y__ -> x__{_RunConfiguration'argument = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default RunConfiguration where
                              +        def = RunConfiguration{_RunConfiguration'argument = []}
                              +
                              +instance Data.ProtoLens.Message RunConfiguration where
                              +        descriptor
                              +          = let argument__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "argument"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked argument)
                              +                      :: Data.ProtoLens.FieldDescriptor RunConfiguration
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.RunConfiguration")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, argument__field_descriptor)])
                              +                (Data.Map.fromList [("argument", argument__field_descriptor)])
                              +
                              +data TestResults = TestResults{_TestResults'target ::
                              +                               !Data.Text.Text,
                              +                               _TestResults'entries :: !(Prelude.Maybe BenchmarkEntries),
                              +                               _TestResults'buildConfiguration ::
                              +                               !(Prelude.Maybe BuildConfiguration),
                              +                               _TestResults'commitId :: !(Prelude.Maybe CommitId),
                              +                               _TestResults'startTime :: !Data.Int.Int64,
                              +                               _TestResults'runTime :: !Prelude.Double,
                              +                               _TestResults'machineConfiguration ::
                              +                               !(Prelude.Maybe MachineConfiguration),
                              +                               _TestResults'runConfiguration :: !(Prelude.Maybe RunConfiguration),
                              +                               _TestResults'name :: !Data.Text.Text,
                              +                               _TestResults'benchmarkType :: !TestResults'BenchmarkType,
                              +                               _TestResults'runMode :: !Data.Text.Text}
                              +                 deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "target" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'target
                              +                 (\ x__ y__ -> x__{_TestResults'target = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ BenchmarkEntries, b ~ BenchmarkEntries,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "entries" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'entries
                              +                 (\ x__ y__ -> x__{_TestResults'entries = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe BenchmarkEntries,
                              +          b ~ Prelude.Maybe BenchmarkEntries, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'entries" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'entries
                              +                 (\ x__ y__ -> x__{_TestResults'entries = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ BuildConfiguration, b ~ BuildConfiguration,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "buildConfiguration" f TestResults TestResults
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'buildConfiguration
                              +                 (\ x__ y__ -> x__{_TestResults'buildConfiguration = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe BuildConfiguration,
                              +          b ~ Prelude.Maybe BuildConfiguration, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'buildConfiguration" f TestResults
                              +           TestResults
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'buildConfiguration
                              +                 (\ x__ y__ -> x__{_TestResults'buildConfiguration = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ CommitId, b ~ CommitId, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "commitId" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'commitId
                              +                 (\ x__ y__ -> x__{_TestResults'commitId = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe CommitId, b ~ Prelude.Maybe CommitId,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'commitId" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'commitId
                              +                 (\ x__ y__ -> x__{_TestResults'commitId = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Int.Int64, b ~ Data.Int.Int64,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "startTime" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'startTime
                              +                 (\ x__ y__ -> x__{_TestResults'startTime = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Prelude.Double, b ~ Prelude.Double,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "runTime" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'runTime
                              +                 (\ x__ y__ -> x__{_TestResults'runTime = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ MachineConfiguration, b ~ MachineConfiguration,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "machineConfiguration" f TestResults
                              +           TestResults
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'machineConfiguration
                              +                 (\ x__ y__ -> x__{_TestResults'machineConfiguration = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe MachineConfiguration,
                              +          b ~ Prelude.Maybe MachineConfiguration, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'machineConfiguration" f TestResults
                              +           TestResults
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'machineConfiguration
                              +                 (\ x__ y__ -> x__{_TestResults'machineConfiguration = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ RunConfiguration, b ~ RunConfiguration,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "runConfiguration" f TestResults TestResults a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'runConfiguration
                              +                 (\ x__ y__ -> x__{_TestResults'runConfiguration = y__}))
                              +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
                              +
                              +instance (a ~ Prelude.Maybe RunConfiguration,
                              +          b ~ Prelude.Maybe RunConfiguration, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "maybe'runConfiguration" f TestResults
                              +           TestResults
                              +           a
                              +           b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'runConfiguration
                              +                 (\ x__ y__ -> x__{_TestResults'runConfiguration = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "name" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'name
                              +                 (\ x__ y__ -> x__{_TestResults'name = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ TestResults'BenchmarkType,
                              +          b ~ TestResults'BenchmarkType, Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "benchmarkType" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'benchmarkType
                              +                 (\ x__ y__ -> x__{_TestResults'benchmarkType = y__}))
                              +              Prelude.id
                              +
                              +instance (a ~ Data.Text.Text, b ~ Data.Text.Text,
                              +          Prelude.Functor f) =>
                              +         Lens.Labels.HasLens "runMode" f TestResults TestResults a b
                              +         where
                              +        lensOf _
                              +          = (Prelude..)
                              +              (Lens.Family2.Unchecked.lens _TestResults'runMode
                              +                 (\ x__ y__ -> x__{_TestResults'runMode = y__}))
                              +              Prelude.id
                              +
                              +instance Data.Default.Class.Default TestResults where
                              +        def
                              +          = TestResults{_TestResults'target = Data.ProtoLens.fieldDefault,
                              +                        _TestResults'entries = Prelude.Nothing,
                              +                        _TestResults'buildConfiguration = Prelude.Nothing,
                              +                        _TestResults'commitId = Prelude.Nothing,
                              +                        _TestResults'startTime = Data.ProtoLens.fieldDefault,
                              +                        _TestResults'runTime = Data.ProtoLens.fieldDefault,
                              +                        _TestResults'machineConfiguration = Prelude.Nothing,
                              +                        _TestResults'runConfiguration = Prelude.Nothing,
                              +                        _TestResults'name = Data.ProtoLens.fieldDefault,
                              +                        _TestResults'benchmarkType = Data.Default.Class.def,
                              +                        _TestResults'runMode = Data.ProtoLens.fieldDefault}
                              +
                              +instance Data.ProtoLens.Message TestResults where
                              +        descriptor
                              +          = let target__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "target"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional target)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                entries__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "entries"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor BenchmarkEntries)
                              +                      (Data.ProtoLens.OptionalField maybe'entries)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                buildConfiguration__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "build_configuration"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor BuildConfiguration)
                              +                      (Data.ProtoLens.OptionalField maybe'buildConfiguration)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                commitId__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "commit_id"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor CommitId)
                              +                      (Data.ProtoLens.OptionalField maybe'commitId)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                startTime__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "start_time"
                              +                      (Data.ProtoLens.Int64Field ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional startTime)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                runTime__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "run_time"
                              +                      (Data.ProtoLens.DoubleField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional runTime)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                machineConfiguration__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "machine_configuration"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor MachineConfiguration)
                              +                      (Data.ProtoLens.OptionalField maybe'machineConfiguration)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                runConfiguration__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "run_configuration"
                              +                      (Data.ProtoLens.MessageField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor RunConfiguration)
                              +                      (Data.ProtoLens.OptionalField maybe'runConfiguration)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                name__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "name"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                benchmarkType__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "benchmark_type"
                              +                      (Data.ProtoLens.EnumField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor TestResults'BenchmarkType)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional benchmarkType)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +                runMode__field_descriptor
                              +                  = Data.ProtoLens.FieldDescriptor "run_mode"
                              +                      (Data.ProtoLens.StringField ::
                              +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
                              +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional runMode)
                              +                      :: Data.ProtoLens.FieldDescriptor TestResults
                              +              in
                              +              Data.ProtoLens.MessageDescriptor
                              +                (Data.Text.pack "tensorflow.TestResults")
                              +                (Data.Map.fromList
                              +                   [(Data.ProtoLens.Tag 1, target__field_descriptor),
                              +                    (Data.ProtoLens.Tag 2, entries__field_descriptor),
                              +                    (Data.ProtoLens.Tag 3, buildConfiguration__field_descriptor),
                              +                    (Data.ProtoLens.Tag 4, commitId__field_descriptor),
                              +                    (Data.ProtoLens.Tag 5, startTime__field_descriptor),
                              +                    (Data.ProtoLens.Tag 6, runTime__field_descriptor),
                              +                    (Data.ProtoLens.Tag 7, machineConfiguration__field_descriptor),
                              +                    (Data.ProtoLens.Tag 8, runConfiguration__field_descriptor),
                              +                    (Data.ProtoLens.Tag 9, name__field_descriptor),
                              +                    (Data.ProtoLens.Tag 10, benchmarkType__field_descriptor),
                              +                    (Data.ProtoLens.Tag 11, runMode__field_descriptor)])
                              +                (Data.Map.fromList
                              +                   [("target", target__field_descriptor),
                              +                    ("entries", entries__field_descriptor),
                              +                    ("build_configuration", buildConfiguration__field_descriptor),
                              +                    ("commit_id", commitId__field_descriptor),
                              +                    ("start_time", startTime__field_descriptor),
                              +                    ("run_time", runTime__field_descriptor),
                              +                    ("machine_configuration", machineConfiguration__field_descriptor),
                              +                    ("run_configuration", runConfiguration__field_descriptor),
                              +                    ("name", name__field_descriptor),
                              +                    ("benchmark_type", benchmarkType__field_descriptor),
                              +                    ("run_mode", runMode__field_descriptor)])
                              +
                              +data TestResults'BenchmarkType = TestResults'UNKNOWN
                              +                               | TestResults'CPP_MICROBENCHMARK
                              +                               | TestResults'PYTHON_BENCHMARK
                              +                               | TestResults'ANDROID_BENCHMARK
                              +                               deriving (Prelude.Show, Prelude.Eq, Prelude.Ord)
                              +
                              +instance Data.Default.Class.Default TestResults'BenchmarkType where
                              +        def = TestResults'UNKNOWN
                              +
                              +instance Data.ProtoLens.FieldDefault TestResults'BenchmarkType
                              +         where
                              +        fieldDefault = TestResults'UNKNOWN
                              +
                              +instance Data.ProtoLens.MessageEnum TestResults'BenchmarkType where
                              +        maybeToEnum 0 = Prelude.Just TestResults'UNKNOWN
                              +        maybeToEnum 1 = Prelude.Just TestResults'CPP_MICROBENCHMARK
                              +        maybeToEnum 2 = Prelude.Just TestResults'PYTHON_BENCHMARK
                              +        maybeToEnum 3 = Prelude.Just TestResults'ANDROID_BENCHMARK
                              +        maybeToEnum _ = Prelude.Nothing
                              +        showEnum TestResults'UNKNOWN = "UNKNOWN"
                              +        showEnum TestResults'CPP_MICROBENCHMARK = "CPP_MICROBENCHMARK"
                              +        showEnum TestResults'PYTHON_BENCHMARK = "PYTHON_BENCHMARK"
                              +        showEnum TestResults'ANDROID_BENCHMARK = "ANDROID_BENCHMARK"
                              +        readEnum "UNKNOWN" = Prelude.Just TestResults'UNKNOWN
                              +        readEnum "CPP_MICROBENCHMARK"
                              +          = Prelude.Just TestResults'CPP_MICROBENCHMARK
                              +        readEnum "PYTHON_BENCHMARK"
                              +          = Prelude.Just TestResults'PYTHON_BENCHMARK
                              +        readEnum "ANDROID_BENCHMARK"
                              +          = Prelude.Just TestResults'ANDROID_BENCHMARK
                              +        readEnum _ = Prelude.Nothing
                              +
                              +instance Prelude.Enum TestResults'BenchmarkType where
                              +        toEnum k__
                              +          = Prelude.maybe
                              +              (Prelude.error
                              +                 ((Prelude.++) "toEnum: unknown value for enum BenchmarkType: "
                              +                    (Prelude.show k__)))
                              +              Prelude.id
                              +              (Data.ProtoLens.maybeToEnum k__)
                              +        fromEnum TestResults'UNKNOWN = 0
                              +        fromEnum TestResults'CPP_MICROBENCHMARK = 1
                              +        fromEnum TestResults'PYTHON_BENCHMARK = 2
                              +        fromEnum TestResults'ANDROID_BENCHMARK = 3
                              +        succ TestResults'ANDROID_BENCHMARK
                              +          = Prelude.error
                              +              "TestResults'BenchmarkType.succ: bad argument TestResults'ANDROID_BENCHMARK. This value would be out of bounds."
                              +        succ TestResults'UNKNOWN = TestResults'CPP_MICROBENCHMARK
                              +        succ TestResults'CPP_MICROBENCHMARK = TestResults'PYTHON_BENCHMARK
                              +        succ TestResults'PYTHON_BENCHMARK = TestResults'ANDROID_BENCHMARK
                              +        pred TestResults'UNKNOWN
                              +          = Prelude.error
                              +              "TestResults'BenchmarkType.pred: bad argument TestResults'UNKNOWN. This value would be out of bounds."
                              +        pred TestResults'CPP_MICROBENCHMARK = TestResults'UNKNOWN
                              +        pred TestResults'PYTHON_BENCHMARK = TestResults'CPP_MICROBENCHMARK
                              +        pred TestResults'ANDROID_BENCHMARK = TestResults'PYTHON_BENCHMARK
                              +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
                              +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
                              +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
                              +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
                              +
                              +instance Prelude.Bounded TestResults'BenchmarkType where
                              +        minBound = TestResults'UNKNOWN
                              +        maxBound = TestResults'ANDROID_BENCHMARK
                              +
                              +argument ::
                              +         forall f s t a b . (Lens.Labels.HasLens "argument" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +argument
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "argument")
                              +
                              +available ::
                              +          forall f s t a b . (Lens.Labels.HasLens "available" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +available
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "available")
                              +
                              +availableDeviceInfo ::
                              +                    forall f s t a b .
                              +                      (Lens.Labels.HasLens "availableDeviceInfo" f s t a b) =>
                              +                      Lens.Family2.LensLike f s t a b
                              +availableDeviceInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "availableDeviceInfo")
                              +
                              +benchmarkType ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "benchmarkType" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +benchmarkType
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "benchmarkType")
                              +
                              +bits ::
                              +     forall f s t a b . (Lens.Labels.HasLens "bits" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +bits
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "bits")
                              +
                              +buildConfiguration ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "buildConfiguration" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +buildConfiguration
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "buildConfiguration")
                              +
                              +busId ::
                              +      forall f s t a b . (Lens.Labels.HasLens "busId" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +busId
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "busId")
                              +
                              +cacheSize ::
                              +          forall f s t a b . (Lens.Labels.HasLens "cacheSize" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +cacheSize
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "cacheSize")
                              +
                              +ccFlags ::
                              +        forall f s t a b . (Lens.Labels.HasLens "ccFlags" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +ccFlags
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "ccFlags")
                              +
                              +changelist ::
                              +           forall f s t a b . (Lens.Labels.HasLens "changelist" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +changelist
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "changelist")
                              +
                              +commitId ::
                              +         forall f s t a b . (Lens.Labels.HasLens "commitId" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +commitId
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "commitId")
                              +
                              +cpuGovernor ::
                              +            forall f s t a b . (Lens.Labels.HasLens "cpuGovernor" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +cpuGovernor
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "cpuGovernor")
                              +
                              +cpuInfo ::
                              +        forall f s t a b . (Lens.Labels.HasLens "cpuInfo" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +cpuInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "cpuInfo")
                              +
                              +cpuTime ::
                              +        forall f s t a b . (Lens.Labels.HasLens "cpuTime" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +cpuTime
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "cpuTime")
                              +
                              +deviceInfo ::
                              +           forall f s t a b . (Lens.Labels.HasLens "deviceInfo" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +deviceInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "deviceInfo")
                              +
                              +doubleValue ::
                              +            forall f s t a b . (Lens.Labels.HasLens "doubleValue" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +doubleValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "doubleValue")
                              +
                              +entries ::
                              +        forall f s t a b . (Lens.Labels.HasLens "entries" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +entries
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "entries")
                              +
                              +entry ::
                              +      forall f s t a b . (Lens.Labels.HasLens "entry" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +entry
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "entry")
                              +
                              +extras ::
                              +       forall f s t a b . (Lens.Labels.HasLens "extras" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +extras
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "extras")
                              +
                              +hash ::
                              +     forall f s t a b . (Lens.Labels.HasLens "hash" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +hash
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "hash")
                              +
                              +hostname ::
                              +         forall f s t a b . (Lens.Labels.HasLens "hostname" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +hostname
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "hostname")
                              +
                              +iters ::
                              +      forall f s t a b . (Lens.Labels.HasLens "iters" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +iters
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "iters")
                              +
                              +key ::
                              +    forall f s t a b . (Lens.Labels.HasLens "key" f s t a b) =>
                              +      Lens.Family2.LensLike f s t a b
                              +key
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "key")
                              +
                              +linkage ::
                              +        forall f s t a b . (Lens.Labels.HasLens "linkage" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +linkage
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "linkage")
                              +
                              +machine ::
                              +        forall f s t a b . (Lens.Labels.HasLens "machine" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +machine
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "machine")
                              +
                              +machineConfiguration ::
                              +                     forall f s t a b .
                              +                       (Lens.Labels.HasLens "machineConfiguration" f s t a b) =>
                              +                       Lens.Family2.LensLike f s t a b
                              +machineConfiguration
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "machineConfiguration")
                              +
                              +maybe'buildConfiguration ::
                              +                         forall f s t a b .
                              +                           (Lens.Labels.HasLens "maybe'buildConfiguration" f s t a b) =>
                              +                           Lens.Family2.LensLike f s t a b
                              +maybe'buildConfiguration
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'buildConfiguration")
                              +
                              +maybe'changelist ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "maybe'changelist" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +maybe'changelist
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'changelist")
                              +
                              +maybe'commitId ::
                              +               forall f s t a b .
                              +                 (Lens.Labels.HasLens "maybe'commitId" f s t a b) =>
                              +                 Lens.Family2.LensLike f s t a b
                              +maybe'commitId
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'commitId")
                              +
                              +maybe'cpuInfo ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybe'cpuInfo" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybe'cpuInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'cpuInfo")
                              +
                              +maybe'doubleValue ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'doubleValue" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'doubleValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'doubleValue")
                              +
                              +maybe'entries ::
                              +              forall f s t a b .
                              +                (Lens.Labels.HasLens "maybe'entries" f s t a b) =>
                              +                Lens.Family2.LensLike f s t a b
                              +maybe'entries
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'entries")
                              +
                              +maybe'hash ::
                              +           forall f s t a b . (Lens.Labels.HasLens "maybe'hash" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +maybe'hash
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'hash")
                              +
                              +maybe'kind ::
                              +           forall f s t a b . (Lens.Labels.HasLens "maybe'kind" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +maybe'kind
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'kind")
                              +
                              +maybe'machineConfiguration ::
                              +                           forall f s t a b .
                              +                             (Lens.Labels.HasLens "maybe'machineConfiguration" f s t a b) =>
                              +                             Lens.Family2.LensLike f s t a b
                              +maybe'machineConfiguration
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'machineConfiguration")
                              +
                              +maybe'memoryInfo ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "maybe'memoryInfo" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +maybe'memoryInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'memoryInfo")
                              +
                              +maybe'platformInfo ::
                              +                   forall f s t a b .
                              +                     (Lens.Labels.HasLens "maybe'platformInfo" f s t a b) =>
                              +                     Lens.Family2.LensLike f s t a b
                              +maybe'platformInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'platformInfo")
                              +
                              +maybe'runConfiguration ::
                              +                       forall f s t a b .
                              +                         (Lens.Labels.HasLens "maybe'runConfiguration" f s t a b) =>
                              +                         Lens.Family2.LensLike f s t a b
                              +maybe'runConfiguration
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "maybe'runConfiguration")
                              +
                              +maybe'stringValue ::
                              +                  forall f s t a b .
                              +                    (Lens.Labels.HasLens "maybe'stringValue" f s t a b) =>
                              +                    Lens.Family2.LensLike f s t a b
                              +maybe'stringValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'stringValue")
                              +
                              +maybe'value ::
                              +            forall f s t a b . (Lens.Labels.HasLens "maybe'value" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +maybe'value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "maybe'value")
                              +
                              +memoryInfo ::
                              +           forall f s t a b . (Lens.Labels.HasLens "memoryInfo" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +memoryInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "memoryInfo")
                              +
                              +memoryLimit ::
                              +            forall f s t a b . (Lens.Labels.HasLens "memoryLimit" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +memoryLimit
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "memoryLimit")
                              +
                              +mhzPerCpu ::
                              +          forall f s t a b . (Lens.Labels.HasLens "mhzPerCpu" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +mhzPerCpu
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "mhzPerCpu")
                              +
                              +mode ::
                              +     forall f s t a b . (Lens.Labels.HasLens "mode" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +mode
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "mode")
                              +
                              +model ::
                              +      forall f s t a b . (Lens.Labels.HasLens "model" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +model
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "model")
                              +
                              +name ::
                              +     forall f s t a b . (Lens.Labels.HasLens "name" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +name
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "name")
                              +
                              +numCores ::
                              +         forall f s t a b . (Lens.Labels.HasLens "numCores" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +numCores
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "numCores")
                              +
                              +numCoresAllowed ::
                              +                forall f s t a b .
                              +                  (Lens.Labels.HasLens "numCoresAllowed" f s t a b) =>
                              +                  Lens.Family2.LensLike f s t a b
                              +numCoresAllowed
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "numCoresAllowed")
                              +
                              +opts ::
                              +     forall f s t a b . (Lens.Labels.HasLens "opts" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +opts
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "opts")
                              +
                              +physicalDescription ::
                              +                    forall f s t a b .
                              +                      (Lens.Labels.HasLens "physicalDescription" f s t a b) =>
                              +                      Lens.Family2.LensLike f s t a b
                              +physicalDescription
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) ::
                              +         (Lens.Labels.Proxy#) "physicalDescription")
                              +
                              +platformInfo ::
                              +             forall f s t a b .
                              +               (Lens.Labels.HasLens "platformInfo" f s t a b) =>
                              +               Lens.Family2.LensLike f s t a b
                              +platformInfo
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "platformInfo")
                              +
                              +release ::
                              +        forall f s t a b . (Lens.Labels.HasLens "release" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +release
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "release")
                              +
                              +runConfiguration ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "runConfiguration" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +runConfiguration
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "runConfiguration")
                              +
                              +runMode ::
                              +        forall f s t a b . (Lens.Labels.HasLens "runMode" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +runMode
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "runMode")
                              +
                              +runTime ::
                              +        forall f s t a b . (Lens.Labels.HasLens "runTime" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +runTime
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "runTime")
                              +
                              +serialIdentifier ::
                              +                 forall f s t a b .
                              +                   (Lens.Labels.HasLens "serialIdentifier" f s t a b) =>
                              +                   Lens.Family2.LensLike f s t a b
                              +serialIdentifier
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "serialIdentifier")
                              +
                              +snapshot ::
                              +         forall f s t a b . (Lens.Labels.HasLens "snapshot" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +snapshot
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "snapshot")
                              +
                              +startTime ::
                              +          forall f s t a b . (Lens.Labels.HasLens "startTime" f s t a b) =>
                              +            Lens.Family2.LensLike f s t a b
                              +startTime
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "startTime")
                              +
                              +stringValue ::
                              +            forall f s t a b . (Lens.Labels.HasLens "stringValue" f s t a b) =>
                              +              Lens.Family2.LensLike f s t a b
                              +stringValue
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "stringValue")
                              +
                              +system ::
                              +       forall f s t a b . (Lens.Labels.HasLens "system" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +system
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "system")
                              +
                              +target ::
                              +       forall f s t a b . (Lens.Labels.HasLens "target" f s t a b) =>
                              +         Lens.Family2.LensLike f s t a b
                              +target
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "target")
                              +
                              +throughput ::
                              +           forall f s t a b . (Lens.Labels.HasLens "throughput" f s t a b) =>
                              +             Lens.Family2.LensLike f s t a b
                              +throughput
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "throughput")
                              +
                              +total ::
                              +      forall f s t a b . (Lens.Labels.HasLens "total" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +total
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "total")
                              +
                              +type' ::
                              +      forall f s t a b . (Lens.Labels.HasLens "type'" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +type'
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "type'")
                              +
                              +uuid ::
                              +     forall f s t a b . (Lens.Labels.HasLens "uuid" f s t a b) =>
                              +       Lens.Family2.LensLike f s t a b
                              +uuid
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "uuid")
                              +
                              +value ::
                              +      forall f s t a b . (Lens.Labels.HasLens "value" f s t a b) =>
                              +        Lens.Family2.LensLike f s t a b
                              +value
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "value")
                              +
                              +version ::
                              +        forall f s t a b . (Lens.Labels.HasLens "version" f s t a b) =>
                              +          Lens.Family2.LensLike f s t a b
                              +version
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "version")
                              +
                              +wallTime ::
                              +         forall f s t a b . (Lens.Labels.HasLens "wallTime" f s t a b) =>
                              +           Lens.Family2.LensLike f s t a b
                              +wallTime
                              +  = Lens.Labels.lensOf
                              +      ((Lens.Labels.proxy#) :: (Lens.Labels.Proxy#) "wallTime")
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-proto-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/style.css b/docs/haddock/tensorflow-proto-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt b/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt deleted file mode 100644 index b46d1f8..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt +++ /dev/null @@ -1,920 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | TensorFlow protocol buffers. --- --- Please see README.md -@package tensorflow-proto -@version 0.1.0.0 - -module Proto.Tensorflow.Core.Framework.ResourceHandle -data ResourceHandle -ResourceHandle :: !Text -> !Text -> !Text -> !Word64 -> !Text -> ResourceHandle -[_ResourceHandle'device] :: ResourceHandle -> !Text -[_ResourceHandle'container] :: ResourceHandle -> !Text -[_ResourceHandle'name] :: ResourceHandle -> !Text -[_ResourceHandle'hashCode] :: ResourceHandle -> !Word64 -[_ResourceHandle'maybeTypeName] :: ResourceHandle -> !Text -container :: HasField "container" msg msg' => Lens msg msg' (Field "container" msg) (Field "container" msg') -device :: HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') -hashCode :: HasField "hashCode" msg msg' => Lens msg msg' (Field "hashCode" msg) (Field "hashCode" msg') -maybeTypeName :: HasField "maybeTypeName" msg msg' => Lens msg msg' (Field "maybeTypeName" msg) (Field "maybeTypeName" msg') -name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle -instance Data.ProtoLens.Field.HasField "device" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle -instance Data.ProtoLens.Field.HasField "container" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle -instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle -instance Data.ProtoLens.Field.HasField "hashCode" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle -instance Data.ProtoLens.Field.HasField "maybeTypeName" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle - -module Proto.Tensorflow.Core.Framework.Types -data DataType -DT_INVALID :: DataType -DT_FLOAT :: DataType -DT_DOUBLE :: DataType -DT_INT32 :: DataType -DT_UINT8 :: DataType -DT_INT16 :: DataType -DT_INT8 :: DataType -DT_STRING :: DataType -DT_COMPLEX64 :: DataType -DT_INT64 :: DataType -DT_BOOL :: DataType -DT_QINT8 :: DataType -DT_QUINT8 :: DataType -DT_QINT32 :: DataType -DT_BFLOAT16 :: DataType -DT_QINT16 :: DataType -DT_QUINT16 :: DataType -DT_UINT16 :: DataType -DT_COMPLEX128 :: DataType -DT_HALF :: DataType -DT_RESOURCE :: DataType -DT_FLOAT_REF :: DataType -DT_DOUBLE_REF :: DataType -DT_INT32_REF :: DataType -DT_UINT8_REF :: DataType -DT_INT16_REF :: DataType -DT_INT8_REF :: DataType -DT_STRING_REF :: DataType -DT_COMPLEX64_REF :: DataType -DT_INT64_REF :: DataType -DT_BOOL_REF :: DataType -DT_QINT8_REF :: DataType -DT_QUINT8_REF :: DataType -DT_QINT32_REF :: DataType -DT_BFLOAT16_REF :: DataType -DT_QINT16_REF :: DataType -DT_QUINT16_REF :: DataType -DT_UINT16_REF :: DataType -DT_COMPLEX128_REF :: DataType -DT_HALF_REF :: DataType -DT_RESOURCE_REF :: DataType -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Types.DataType -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Types.DataType -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Types.DataType -instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Framework.Types.DataType -instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Framework.Types.DataType -instance GHC.Enum.Enum Proto.Tensorflow.Core.Framework.Types.DataType -instance GHC.Enum.Bounded Proto.Tensorflow.Core.Framework.Types.DataType - -module Proto.Tensorflow.Core.Framework.TensorShape -data TensorShapeProto -TensorShapeProto :: ![TensorShapeProto'Dim] -> !Bool -> TensorShapeProto -[_TensorShapeProto'dim] :: TensorShapeProto -> ![TensorShapeProto'Dim] -[_TensorShapeProto'unknownRank] :: TensorShapeProto -> !Bool -data TensorShapeProto'Dim -TensorShapeProto'Dim :: !Int64 -> !Text -> TensorShapeProto'Dim -[_TensorShapeProto'Dim'size] :: TensorShapeProto'Dim -> !Int64 -[_TensorShapeProto'Dim'name] :: TensorShapeProto'Dim -> !Text -dim :: HasField "dim" msg msg' => Lens msg msg' (Field "dim" msg) (Field "dim" msg') -name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') -size :: HasField "size" msg msg' => Lens msg msg' (Field "size" msg) (Field "size" msg') -unknownRank :: HasField "unknownRank" msg msg' => Lens msg msg' (Field "unknownRank" msg) (Field "unknownRank" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim -instance Data.ProtoLens.Field.HasField "dim" Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto -instance Data.ProtoLens.Field.HasField "unknownRank" Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto -instance Data.ProtoLens.Field.HasField "size" Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim -instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim - -module Proto.Tensorflow.Core.Framework.Tensor -data TensorProto -TensorProto :: !DataType -> !(Maybe TensorShapeProto) -> !Int32 -> !ByteString -> ![Int32] -> ![Float] -> ![Double] -> ![Int32] -> ![ByteString] -> ![Float] -> ![Int64] -> ![Bool] -> ![Double] -> ![ResourceHandle] -> TensorProto -[_TensorProto'dtype] :: TensorProto -> !DataType -[_TensorProto'tensorShape] :: TensorProto -> !(Maybe TensorShapeProto) -[_TensorProto'versionNumber] :: TensorProto -> !Int32 -[_TensorProto'tensorContent] :: TensorProto -> !ByteString -[_TensorProto'halfVal] :: TensorProto -> ![Int32] -[_TensorProto'floatVal] :: TensorProto -> ![Float] -[_TensorProto'doubleVal] :: TensorProto -> ![Double] -[_TensorProto'intVal] :: TensorProto -> ![Int32] -[_TensorProto'stringVal] :: TensorProto -> ![ByteString] -[_TensorProto'scomplexVal] :: TensorProto -> ![Float] -[_TensorProto'int64Val] :: TensorProto -> ![Int64] -[_TensorProto'boolVal] :: TensorProto -> ![Bool] -[_TensorProto'dcomplexVal] :: TensorProto -> ![Double] -[_TensorProto'resourceHandleVal] :: TensorProto -> ![ResourceHandle] -boolVal :: HasField "boolVal" msg msg' => Lens msg msg' (Field "boolVal" msg) (Field "boolVal" msg') -dcomplexVal :: HasField "dcomplexVal" msg msg' => Lens msg msg' (Field "dcomplexVal" msg) (Field "dcomplexVal" msg') -doubleVal :: HasField "doubleVal" msg msg' => Lens msg msg' (Field "doubleVal" msg) (Field "doubleVal" msg') -dtype :: HasField "dtype" msg msg' => Lens msg msg' (Field "dtype" msg) (Field "dtype" msg') -floatVal :: HasField "floatVal" msg msg' => Lens msg msg' (Field "floatVal" msg) (Field "floatVal" msg') -halfVal :: HasField "halfVal" msg msg' => Lens msg msg' (Field "halfVal" msg) (Field "halfVal" msg') -int64Val :: HasField "int64Val" msg msg' => Lens msg msg' (Field "int64Val" msg) (Field "int64Val" msg') -intVal :: HasField "intVal" msg msg' => Lens msg msg' (Field "intVal" msg) (Field "intVal" msg') -maybe'tensorShape :: HasField "maybe'tensorShape" msg msg' => Lens msg msg' (Field "maybe'tensorShape" msg) (Field "maybe'tensorShape" msg') -resourceHandleVal :: HasField "resourceHandleVal" msg msg' => Lens msg msg' (Field "resourceHandleVal" msg) (Field "resourceHandleVal" msg') -scomplexVal :: HasField "scomplexVal" msg msg' => Lens msg msg' (Field "scomplexVal" msg) (Field "scomplexVal" msg') -stringVal :: HasField "stringVal" msg msg' => Lens msg msg' (Field "stringVal" msg) (Field "stringVal" msg') -tensorContent :: HasField "tensorContent" msg msg' => Lens msg msg' (Field "tensorContent" msg) (Field "tensorContent" msg') -tensorShape :: HasField "tensorShape" msg msg' => Lens msg msg' (Field "tensorShape" msg) (Field "tensorShape" msg') -versionNumber :: HasField "versionNumber" msg msg' => Lens msg msg' (Field "versionNumber" msg) (Field "versionNumber" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "dtype" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "tensorShape" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "maybe'tensorShape" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "versionNumber" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "tensorContent" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "halfVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "floatVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "doubleVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "intVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "stringVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "scomplexVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "int64Val" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "boolVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "dcomplexVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Field.HasField "resourceHandleVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Tensor.TensorProto -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Tensor.TensorProto - -module Proto.Tensorflow.Core.Framework.Summary -data HistogramProto -HistogramProto :: !Double -> !Double -> !Double -> !Double -> !Double -> ![Double] -> ![Double] -> HistogramProto -[_HistogramProto'min] :: HistogramProto -> !Double -[_HistogramProto'max] :: HistogramProto -> !Double -[_HistogramProto'num] :: HistogramProto -> !Double -[_HistogramProto'sum] :: HistogramProto -> !Double -[_HistogramProto'sumSquares] :: HistogramProto -> !Double -[_HistogramProto'bucketLimit] :: HistogramProto -> ![Double] -[_HistogramProto'bucket] :: HistogramProto -> ![Double] -data Summary -Summary :: ![Summary'Value] -> Summary -[_Summary'value] :: Summary -> ![Summary'Value] -data Summary'Audio -Summary'Audio :: !Float -> !Int64 -> !Int64 -> !ByteString -> !Text -> Summary'Audio -[_Summary'Audio'sampleRate] :: Summary'Audio -> !Float -[_Summary'Audio'numChannels] :: Summary'Audio -> !Int64 -[_Summary'Audio'lengthFrames] :: Summary'Audio -> !Int64 -[_Summary'Audio'encodedAudioString] :: Summary'Audio -> !ByteString -[_Summary'Audio'contentType] :: Summary'Audio -> !Text -data Summary'Image -Summary'Image :: !Int32 -> !Int32 -> !Int32 -> !ByteString -> Summary'Image -[_Summary'Image'height] :: Summary'Image -> !Int32 -[_Summary'Image'width] :: Summary'Image -> !Int32 -[_Summary'Image'colorspace] :: Summary'Image -> !Int32 -[_Summary'Image'encodedImageString] :: Summary'Image -> !ByteString -data Summary'Value -Summary'Value :: !Text -> !Text -> !(Maybe Float) -> !(Maybe ByteString) -> !(Maybe Summary'Image) -> !(Maybe HistogramProto) -> !(Maybe Summary'Audio) -> !(Maybe TensorProto) -> Summary'Value -[_Summary'Value'nodeName] :: Summary'Value -> !Text -[_Summary'Value'tag] :: Summary'Value -> !Text -[_Summary'Value'simpleValue] :: Summary'Value -> !(Maybe Float) -[_Summary'Value'obsoleteOldStyleHistogram] :: Summary'Value -> !(Maybe ByteString) -[_Summary'Value'image] :: Summary'Value -> !(Maybe Summary'Image) -[_Summary'Value'histo] :: Summary'Value -> !(Maybe HistogramProto) -[_Summary'Value'audio] :: Summary'Value -> !(Maybe Summary'Audio) -[_Summary'Value'tensor] :: Summary'Value -> !(Maybe TensorProto) -data SummaryDescription -SummaryDescription :: !Text -> SummaryDescription -[_SummaryDescription'typeHint] :: SummaryDescription -> !Text -audio :: HasField "audio" msg msg' => Lens msg msg' (Field "audio" msg) (Field "audio" msg') -bucket :: HasField "bucket" msg msg' => Lens msg msg' (Field "bucket" msg) (Field "bucket" msg') -bucketLimit :: HasField "bucketLimit" msg msg' => Lens msg msg' (Field "bucketLimit" msg) (Field "bucketLimit" msg') -colorspace :: HasField "colorspace" msg msg' => Lens msg msg' (Field "colorspace" msg) (Field "colorspace" msg') -contentType :: HasField "contentType" msg msg' => Lens msg msg' (Field "contentType" msg) (Field "contentType" msg') -encodedAudioString :: HasField "encodedAudioString" msg msg' => Lens msg msg' (Field "encodedAudioString" msg) (Field "encodedAudioString" msg') -encodedImageString :: HasField "encodedImageString" msg msg' => Lens msg msg' (Field "encodedImageString" msg) (Field "encodedImageString" msg') -height :: HasField "height" msg msg' => Lens msg msg' (Field "height" msg) (Field "height" msg') -histo :: HasField "histo" msg msg' => Lens msg msg' (Field "histo" msg) (Field "histo" msg') -image :: HasField "image" msg msg' => Lens msg msg' (Field "image" msg) (Field "image" msg') -lengthFrames :: HasField "lengthFrames" msg msg' => Lens msg msg' (Field "lengthFrames" msg) (Field "lengthFrames" msg') -max :: HasField "max" msg msg' => Lens msg msg' (Field "max" msg) (Field "max" msg') -maybe'audio :: HasField "maybe'audio" msg msg' => Lens msg msg' (Field "maybe'audio" msg) (Field "maybe'audio" msg') -maybe'histo :: HasField "maybe'histo" msg msg' => Lens msg msg' (Field "maybe'histo" msg) (Field "maybe'histo" msg') -maybe'image :: HasField "maybe'image" msg msg' => Lens msg msg' (Field "maybe'image" msg) (Field "maybe'image" msg') -maybe'obsoleteOldStyleHistogram :: HasField "maybe'obsoleteOldStyleHistogram" msg msg' => Lens msg msg' (Field "maybe'obsoleteOldStyleHistogram" msg) (Field "maybe'obsoleteOldStyleHistogram" msg') -maybe'simpleValue :: HasField "maybe'simpleValue" msg msg' => Lens msg msg' (Field "maybe'simpleValue" msg) (Field "maybe'simpleValue" msg') -maybe'tensor :: HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg') -min :: HasField "min" msg msg' => Lens msg msg' (Field "min" msg) (Field "min" msg') -nodeName :: HasField "nodeName" msg msg' => Lens msg msg' (Field "nodeName" msg) (Field "nodeName" msg') -num :: HasField "num" msg msg' => Lens msg msg' (Field "num" msg) (Field "num" msg') -numChannels :: HasField "numChannels" msg msg' => Lens msg msg' (Field "numChannels" msg) (Field "numChannels" msg') -obsoleteOldStyleHistogram :: HasField "obsoleteOldStyleHistogram" msg msg' => Lens msg msg' (Field "obsoleteOldStyleHistogram" msg) (Field "obsoleteOldStyleHistogram" msg') -sampleRate :: HasField "sampleRate" msg msg' => Lens msg msg' (Field "sampleRate" msg) (Field "sampleRate" msg') -simpleValue :: HasField "simpleValue" msg msg' => Lens msg msg' (Field "simpleValue" msg) (Field "simpleValue" msg') -sum :: HasField "sum" msg msg' => Lens msg msg' (Field "sum" msg) (Field "sum" msg') -sumSquares :: HasField "sumSquares" msg msg' => Lens msg msg' (Field "sumSquares" msg) (Field "sumSquares" msg') -tag :: HasField "tag" msg msg' => Lens msg msg' (Field "tag" msg) (Field "tag" msg') -tensor :: HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg') -typeHint :: HasField "typeHint" msg msg' => Lens msg msg' (Field "typeHint" msg) (Field "typeHint" msg') -value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') -width :: HasField "width" msg msg' => Lens msg msg' (Field "width" msg) (Field "width" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.SummaryDescription -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.SummaryDescription -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.Summary -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.Summary -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.Summary'Image -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.Summary'Image -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.Summary'Audio -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.Summary'Audio -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.ProtoLens.Field.HasField "min" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.ProtoLens.Field.HasField "max" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.ProtoLens.Field.HasField "num" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.ProtoLens.Field.HasField "sum" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.ProtoLens.Field.HasField "sumSquares" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.ProtoLens.Field.HasField "bucketLimit" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.ProtoLens.Field.HasField "bucket" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.HistogramProto -instance Data.ProtoLens.Field.HasField "value" Proto.Tensorflow.Core.Framework.Summary.Summary Proto.Tensorflow.Core.Framework.Summary.Summary -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.Summary -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.Summary -instance Data.ProtoLens.Field.HasField "sampleRate" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio -instance Data.ProtoLens.Field.HasField "numChannels" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio -instance Data.ProtoLens.Field.HasField "lengthFrames" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio -instance Data.ProtoLens.Field.HasField "encodedAudioString" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio -instance Data.ProtoLens.Field.HasField "contentType" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.Summary'Audio -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.Summary'Audio -instance Data.ProtoLens.Field.HasField "height" Proto.Tensorflow.Core.Framework.Summary.Summary'Image Proto.Tensorflow.Core.Framework.Summary.Summary'Image -instance Data.ProtoLens.Field.HasField "width" Proto.Tensorflow.Core.Framework.Summary.Summary'Image Proto.Tensorflow.Core.Framework.Summary.Summary'Image -instance Data.ProtoLens.Field.HasField "colorspace" Proto.Tensorflow.Core.Framework.Summary.Summary'Image Proto.Tensorflow.Core.Framework.Summary.Summary'Image -instance Data.ProtoLens.Field.HasField "encodedImageString" Proto.Tensorflow.Core.Framework.Summary.Summary'Image Proto.Tensorflow.Core.Framework.Summary.Summary'Image -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.Summary'Image -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.Summary'Image -instance Data.ProtoLens.Field.HasField "nodeName" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "tag" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "simpleValue" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "maybe'simpleValue" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "obsoleteOldStyleHistogram" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "maybe'obsoleteOldStyleHistogram" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "image" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "maybe'image" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "histo" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "maybe'histo" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "audio" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "maybe'audio" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "tensor" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "maybe'tensor" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.Summary'Value -instance Data.ProtoLens.Field.HasField "typeHint" Proto.Tensorflow.Core.Framework.Summary.SummaryDescription Proto.Tensorflow.Core.Framework.Summary.SummaryDescription -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.SummaryDescription -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.SummaryDescription - -module Proto.Tensorflow.Core.Util.Event -data Event -Event :: !Double -> !Int64 -> !(Maybe Text) -> !(Maybe ByteString) -> !(Maybe Summary) -> !(Maybe LogMessage) -> !(Maybe SessionLog) -> !(Maybe TaggedRunMetadata) -> !(Maybe ByteString) -> Event -[_Event'wallTime] :: Event -> !Double -[_Event'step] :: Event -> !Int64 -[_Event'fileVersion] :: Event -> !(Maybe Text) -[_Event'graphDef] :: Event -> !(Maybe ByteString) -[_Event'summary] :: Event -> !(Maybe Summary) -[_Event'logMessage] :: Event -> !(Maybe LogMessage) -[_Event'sessionLog] :: Event -> !(Maybe SessionLog) -[_Event'taggedRunMetadata] :: Event -> !(Maybe TaggedRunMetadata) -[_Event'metaGraphDef] :: Event -> !(Maybe ByteString) -data LogMessage -LogMessage :: !LogMessage'Level -> !Text -> LogMessage -[_LogMessage'level] :: LogMessage -> !LogMessage'Level -[_LogMessage'message] :: LogMessage -> !Text -data LogMessage'Level -LogMessage'UNKNOWN :: LogMessage'Level -LogMessage'DEBUG :: LogMessage'Level -LogMessage'INFO :: LogMessage'Level -LogMessage'WARN :: LogMessage'Level -LogMessage'ERROR :: LogMessage'Level -LogMessage'FATAL :: LogMessage'Level -data SessionLog -SessionLog :: !SessionLog'SessionStatus -> !Text -> !Text -> SessionLog -[_SessionLog'status] :: SessionLog -> !SessionLog'SessionStatus -[_SessionLog'checkpointPath] :: SessionLog -> !Text -[_SessionLog'msg] :: SessionLog -> !Text -data SessionLog'SessionStatus -SessionLog'STATUS_UNSPECIFIED :: SessionLog'SessionStatus -SessionLog'START :: SessionLog'SessionStatus -SessionLog'STOP :: SessionLog'SessionStatus -SessionLog'CHECKPOINT :: SessionLog'SessionStatus -data TaggedRunMetadata -TaggedRunMetadata :: !Text -> !ByteString -> TaggedRunMetadata -[_TaggedRunMetadata'tag] :: TaggedRunMetadata -> !Text -[_TaggedRunMetadata'runMetadata] :: TaggedRunMetadata -> !ByteString -checkpointPath :: HasField "checkpointPath" msg msg' => Lens msg msg' (Field "checkpointPath" msg) (Field "checkpointPath" msg') -fileVersion :: HasField "fileVersion" msg msg' => Lens msg msg' (Field "fileVersion" msg) (Field "fileVersion" msg') -graphDef :: HasField "graphDef" msg msg' => Lens msg msg' (Field "graphDef" msg) (Field "graphDef" msg') -level :: HasField "level" msg msg' => Lens msg msg' (Field "level" msg) (Field "level" msg') -logMessage :: HasField "logMessage" msg msg' => Lens msg msg' (Field "logMessage" msg) (Field "logMessage" msg') -maybe'fileVersion :: HasField "maybe'fileVersion" msg msg' => Lens msg msg' (Field "maybe'fileVersion" msg) (Field "maybe'fileVersion" msg') -maybe'graphDef :: HasField "maybe'graphDef" msg msg' => Lens msg msg' (Field "maybe'graphDef" msg) (Field "maybe'graphDef" msg') -maybe'logMessage :: HasField "maybe'logMessage" msg msg' => Lens msg msg' (Field "maybe'logMessage" msg) (Field "maybe'logMessage" msg') -maybe'metaGraphDef :: HasField "maybe'metaGraphDef" msg msg' => Lens msg msg' (Field "maybe'metaGraphDef" msg) (Field "maybe'metaGraphDef" msg') -maybe'sessionLog :: HasField "maybe'sessionLog" msg msg' => Lens msg msg' (Field "maybe'sessionLog" msg) (Field "maybe'sessionLog" msg') -maybe'summary :: HasField "maybe'summary" msg msg' => Lens msg msg' (Field "maybe'summary" msg) (Field "maybe'summary" msg') -maybe'taggedRunMetadata :: HasField "maybe'taggedRunMetadata" msg msg' => Lens msg msg' (Field "maybe'taggedRunMetadata" msg) (Field "maybe'taggedRunMetadata" msg') -message :: HasField "message" msg msg' => Lens msg msg' (Field "message" msg) (Field "message" msg') -metaGraphDef :: HasField "metaGraphDef" msg msg' => Lens msg msg' (Field "metaGraphDef" msg) (Field "metaGraphDef" msg') -msg :: HasField "msg" msg msg' => Lens msg msg' (Field "msg" msg) (Field "msg" msg') -runMetadata :: HasField "runMetadata" msg msg' => Lens msg msg' (Field "runMetadata" msg) (Field "runMetadata" msg') -sessionLog :: HasField "sessionLog" msg msg' => Lens msg msg' (Field "sessionLog" msg) (Field "sessionLog" msg') -status :: HasField "status" msg msg' => Lens msg msg' (Field "status" msg) (Field "status" msg') -step :: HasField "step" msg msg' => Lens msg msg' (Field "step" msg) (Field "step" msg') -summary :: HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg') -tag :: HasField "tag" msg msg' => Lens msg msg' (Field "tag" msg) (Field "tag" msg') -taggedRunMetadata :: HasField "taggedRunMetadata" msg msg' => Lens msg msg' (Field "taggedRunMetadata" msg) (Field "taggedRunMetadata" msg') -wallTime :: HasField "wallTime" msg msg' => Lens msg msg' (Field "wallTime" msg) (Field "wallTime" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.Event -instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.Event -instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata -instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata -instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.SessionLog -instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.SessionLog -instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus -instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus -instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.LogMessage -instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.LogMessage -instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.LogMessage'Level -instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.LogMessage'Level -instance Data.ProtoLens.Field.HasField "wallTime" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "step" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "fileVersion" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "maybe'fileVersion" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "graphDef" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "maybe'graphDef" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "summary" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "maybe'summary" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "logMessage" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "maybe'logMessage" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "sessionLog" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "maybe'sessionLog" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "taggedRunMetadata" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "maybe'taggedRunMetadata" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "metaGraphDef" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "maybe'metaGraphDef" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event -instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Util.Event.Event -instance Data.ProtoLens.Field.HasField "level" Proto.Tensorflow.Core.Util.Event.LogMessage Proto.Tensorflow.Core.Util.Event.LogMessage -instance Data.ProtoLens.Field.HasField "message" Proto.Tensorflow.Core.Util.Event.LogMessage Proto.Tensorflow.Core.Util.Event.LogMessage -instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.LogMessage -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Util.Event.LogMessage -instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.LogMessage'Level -instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Util.Event.LogMessage'Level -instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Util.Event.LogMessage'Level -instance GHC.Enum.Enum Proto.Tensorflow.Core.Util.Event.LogMessage'Level -instance GHC.Enum.Bounded Proto.Tensorflow.Core.Util.Event.LogMessage'Level -instance Data.ProtoLens.Field.HasField "status" Proto.Tensorflow.Core.Util.Event.SessionLog Proto.Tensorflow.Core.Util.Event.SessionLog -instance Data.ProtoLens.Field.HasField "checkpointPath" Proto.Tensorflow.Core.Util.Event.SessionLog Proto.Tensorflow.Core.Util.Event.SessionLog -instance Data.ProtoLens.Field.HasField "msg" Proto.Tensorflow.Core.Util.Event.SessionLog Proto.Tensorflow.Core.Util.Event.SessionLog -instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.SessionLog -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Util.Event.SessionLog -instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus -instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus -instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus -instance GHC.Enum.Enum Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus -instance GHC.Enum.Bounded Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus -instance Data.ProtoLens.Field.HasField "tag" Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata -instance Data.ProtoLens.Field.HasField "runMetadata" Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata -instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata - -module Proto.Tensorflow.Core.Framework.AttrValue -data AttrValue -AttrValue :: !(Maybe ByteString) -> !(Maybe Int64) -> !(Maybe Float) -> !(Maybe Bool) -> !(Maybe DataType) -> !(Maybe TensorShapeProto) -> !(Maybe TensorProto) -> !(Maybe AttrValue'ListValue) -> !(Maybe NameAttrList) -> !(Maybe Text) -> AttrValue -[_AttrValue's] :: AttrValue -> !(Maybe ByteString) -[_AttrValue'i] :: AttrValue -> !(Maybe Int64) -[_AttrValue'f] :: AttrValue -> !(Maybe Float) -[_AttrValue'b] :: AttrValue -> !(Maybe Bool) -[_AttrValue'type'] :: AttrValue -> !(Maybe DataType) -[_AttrValue'shape] :: AttrValue -> !(Maybe TensorShapeProto) -[_AttrValue'tensor] :: AttrValue -> !(Maybe TensorProto) -[_AttrValue'list] :: AttrValue -> !(Maybe AttrValue'ListValue) -[_AttrValue'func] :: AttrValue -> !(Maybe NameAttrList) -[_AttrValue'placeholder] :: AttrValue -> !(Maybe Text) -data AttrValue'ListValue -AttrValue'ListValue :: ![ByteString] -> ![Int64] -> ![Float] -> ![Bool] -> ![DataType] -> ![TensorShapeProto] -> ![TensorProto] -> ![NameAttrList] -> AttrValue'ListValue -[_AttrValue'ListValue's] :: AttrValue'ListValue -> ![ByteString] -[_AttrValue'ListValue'i] :: AttrValue'ListValue -> ![Int64] -[_AttrValue'ListValue'f] :: AttrValue'ListValue -> ![Float] -[_AttrValue'ListValue'b] :: AttrValue'ListValue -> ![Bool] -[_AttrValue'ListValue'type'] :: AttrValue'ListValue -> ![DataType] -[_AttrValue'ListValue'shape] :: AttrValue'ListValue -> ![TensorShapeProto] -[_AttrValue'ListValue'tensor] :: AttrValue'ListValue -> ![TensorProto] -[_AttrValue'ListValue'func] :: AttrValue'ListValue -> ![NameAttrList] -data NameAttrList -NameAttrList :: !Text -> !(Map Text AttrValue) -> NameAttrList -[_NameAttrList'name] :: NameAttrList -> !Text -[_NameAttrList'attr] :: NameAttrList -> !(Map Text AttrValue) -data NameAttrList'AttrEntry -NameAttrList'AttrEntry :: !Text -> !(Maybe AttrValue) -> NameAttrList'AttrEntry -[_NameAttrList'AttrEntry'key] :: NameAttrList'AttrEntry -> !Text -[_NameAttrList'AttrEntry'value] :: NameAttrList'AttrEntry -> !(Maybe AttrValue) -attr :: HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') -b :: HasField "b" msg msg' => Lens msg msg' (Field "b" msg) (Field "b" msg') -f :: HasField "f" msg msg' => Lens msg msg' (Field "f" msg) (Field "f" msg') -func :: HasField "func" msg msg' => Lens msg msg' (Field "func" msg) (Field "func" msg') -i :: HasField "i" msg msg' => Lens msg msg' (Field "i" msg) (Field "i" msg') -key :: HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') -list :: HasField "list" msg msg' => Lens msg msg' (Field "list" msg) (Field "list" msg') -maybe'b :: HasField "maybe'b" msg msg' => Lens msg msg' (Field "maybe'b" msg) (Field "maybe'b" msg') -maybe'f :: HasField "maybe'f" msg msg' => Lens msg msg' (Field "maybe'f" msg) (Field "maybe'f" msg') -maybe'func :: HasField "maybe'func" msg msg' => Lens msg msg' (Field "maybe'func" msg) (Field "maybe'func" msg') -maybe'i :: HasField "maybe'i" msg msg' => Lens msg msg' (Field "maybe'i" msg) (Field "maybe'i" msg') -maybe'list :: HasField "maybe'list" msg msg' => Lens msg msg' (Field "maybe'list" msg) (Field "maybe'list" msg') -maybe'placeholder :: HasField "maybe'placeholder" msg msg' => Lens msg msg' (Field "maybe'placeholder" msg) (Field "maybe'placeholder" msg') -maybe's :: HasField "maybe's" msg msg' => Lens msg msg' (Field "maybe's" msg) (Field "maybe's" msg') -maybe'shape :: HasField "maybe'shape" msg msg' => Lens msg msg' (Field "maybe'shape" msg) (Field "maybe'shape" msg') -maybe'tensor :: HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg') -maybe'type' :: HasField "maybe'type'" msg msg' => Lens msg msg' (Field "maybe'type'" msg) (Field "maybe'type'" msg') -maybe'value :: HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg') -name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') -placeholder :: HasField "placeholder" msg msg' => Lens msg msg' (Field "placeholder" msg) (Field "placeholder" msg') -s :: HasField "s" msg msg' => Lens msg msg' (Field "s" msg) (Field "s" msg') -shape :: HasField "shape" msg msg' => Lens msg msg' (Field "shape" msg) (Field "shape" msg') -tensor :: HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg') -type' :: HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg') -value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList -instance Data.ProtoLens.Field.HasField "s" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe's" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "i" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe'i" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "f" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe'f" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "b" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe'b" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "type'" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe'type'" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "shape" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe'shape" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "tensor" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe'tensor" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "list" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe'list" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "func" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe'func" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "placeholder" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "maybe'placeholder" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValue.AttrValue -instance Data.ProtoLens.Field.HasField "s" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.ProtoLens.Field.HasField "i" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.ProtoLens.Field.HasField "f" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.ProtoLens.Field.HasField "b" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.ProtoLens.Field.HasField "type'" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.ProtoLens.Field.HasField "shape" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.ProtoLens.Field.HasField "tensor" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.ProtoLens.Field.HasField "func" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList -instance Data.ProtoLens.Field.HasField "attr" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList -instance Data.ProtoLens.Field.HasField "key" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry -instance Data.ProtoLens.Field.HasField "value" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry -instance Data.ProtoLens.Field.HasField "maybe'value" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry - -module Proto.Tensorflow.Core.Framework.NodeDef -data NodeDef -NodeDef :: !Text -> !Text -> ![Text] -> !Text -> !(Map Text AttrValue) -> NodeDef -[_NodeDef'name] :: NodeDef -> !Text -[_NodeDef'op] :: NodeDef -> !Text -[_NodeDef'input] :: NodeDef -> ![Text] -[_NodeDef'device] :: NodeDef -> !Text -[_NodeDef'attr] :: NodeDef -> !(Map Text AttrValue) -data NodeDef'AttrEntry -NodeDef'AttrEntry :: !Text -> !(Maybe AttrValue) -> NodeDef'AttrEntry -[_NodeDef'AttrEntry'key] :: NodeDef'AttrEntry -> !Text -[_NodeDef'AttrEntry'value] :: NodeDef'AttrEntry -> !(Maybe AttrValue) -attr :: HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') -device :: HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') -input :: HasField "input" msg msg' => Lens msg msg' (Field "input" msg) (Field "input" msg') -key :: HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') -maybe'value :: HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg') -name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') -op :: HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg') -value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.NodeDef.NodeDef -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.NodeDef.NodeDef -instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef -instance Data.ProtoLens.Field.HasField "op" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef -instance Data.ProtoLens.Field.HasField "input" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef -instance Data.ProtoLens.Field.HasField "device" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef -instance Data.ProtoLens.Field.HasField "attr" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.NodeDef.NodeDef -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.NodeDef.NodeDef -instance Data.ProtoLens.Field.HasField "key" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry -instance Data.ProtoLens.Field.HasField "value" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry -instance Data.ProtoLens.Field.HasField "maybe'value" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry - -module Proto.Tensorflow.Core.Framework.OpDef -data OpDef -OpDef :: !Text -> ![OpDef'ArgDef] -> ![OpDef'ArgDef] -> ![OpDef'AttrDef] -> !(Maybe OpDeprecation) -> !Text -> !Text -> !Bool -> !Bool -> !Bool -> !Bool -> OpDef -[_OpDef'name] :: OpDef -> !Text -[_OpDef'inputArg] :: OpDef -> ![OpDef'ArgDef] -[_OpDef'outputArg] :: OpDef -> ![OpDef'ArgDef] -[_OpDef'attr] :: OpDef -> ![OpDef'AttrDef] -[_OpDef'deprecation] :: OpDef -> !(Maybe OpDeprecation) -[_OpDef'summary] :: OpDef -> !Text -[_OpDef'description] :: OpDef -> !Text -[_OpDef'isCommutative] :: OpDef -> !Bool -[_OpDef'isAggregate] :: OpDef -> !Bool -[_OpDef'isStateful] :: OpDef -> !Bool -[_OpDef'allowsUninitializedInput] :: OpDef -> !Bool -data OpDef'ArgDef -OpDef'ArgDef :: !Text -> !Text -> !DataType -> !Text -> !Text -> !Text -> !Bool -> OpDef'ArgDef -[_OpDef'ArgDef'name] :: OpDef'ArgDef -> !Text -[_OpDef'ArgDef'description] :: OpDef'ArgDef -> !Text -[_OpDef'ArgDef'type'] :: OpDef'ArgDef -> !DataType -[_OpDef'ArgDef'typeAttr] :: OpDef'ArgDef -> !Text -[_OpDef'ArgDef'numberAttr] :: OpDef'ArgDef -> !Text -[_OpDef'ArgDef'typeListAttr] :: OpDef'ArgDef -> !Text -[_OpDef'ArgDef'isRef] :: OpDef'ArgDef -> !Bool -data OpDef'AttrDef -OpDef'AttrDef :: !Text -> !Text -> !(Maybe AttrValue) -> !Text -> !Bool -> !Int64 -> !(Maybe AttrValue) -> OpDef'AttrDef -[_OpDef'AttrDef'name] :: OpDef'AttrDef -> !Text -[_OpDef'AttrDef'type'] :: OpDef'AttrDef -> !Text -[_OpDef'AttrDef'defaultValue] :: OpDef'AttrDef -> !(Maybe AttrValue) -[_OpDef'AttrDef'description] :: OpDef'AttrDef -> !Text -[_OpDef'AttrDef'hasMinimum] :: OpDef'AttrDef -> !Bool -[_OpDef'AttrDef'minimum] :: OpDef'AttrDef -> !Int64 -[_OpDef'AttrDef'allowedValues] :: OpDef'AttrDef -> !(Maybe AttrValue) -data OpDeprecation -OpDeprecation :: !Int32 -> !Text -> OpDeprecation -[_OpDeprecation'version] :: OpDeprecation -> !Int32 -[_OpDeprecation'explanation] :: OpDeprecation -> !Text -data OpList -OpList :: ![OpDef] -> OpList -[_OpList'op] :: OpList -> ![OpDef] -allowedValues :: HasField "allowedValues" msg msg' => Lens msg msg' (Field "allowedValues" msg) (Field "allowedValues" msg') -allowsUninitializedInput :: HasField "allowsUninitializedInput" msg msg' => Lens msg msg' (Field "allowsUninitializedInput" msg) (Field "allowsUninitializedInput" msg') -attr :: HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') -defaultValue :: HasField "defaultValue" msg msg' => Lens msg msg' (Field "defaultValue" msg) (Field "defaultValue" msg') -deprecation :: HasField "deprecation" msg msg' => Lens msg msg' (Field "deprecation" msg) (Field "deprecation" msg') -description :: HasField "description" msg msg' => Lens msg msg' (Field "description" msg) (Field "description" msg') -explanation :: HasField "explanation" msg msg' => Lens msg msg' (Field "explanation" msg) (Field "explanation" msg') -hasMinimum :: HasField "hasMinimum" msg msg' => Lens msg msg' (Field "hasMinimum" msg) (Field "hasMinimum" msg') -inputArg :: HasField "inputArg" msg msg' => Lens msg msg' (Field "inputArg" msg) (Field "inputArg" msg') -isAggregate :: HasField "isAggregate" msg msg' => Lens msg msg' (Field "isAggregate" msg) (Field "isAggregate" msg') -isCommutative :: HasField "isCommutative" msg msg' => Lens msg msg' (Field "isCommutative" msg) (Field "isCommutative" msg') -isRef :: HasField "isRef" msg msg' => Lens msg msg' (Field "isRef" msg) (Field "isRef" msg') -isStateful :: HasField "isStateful" msg msg' => Lens msg msg' (Field "isStateful" msg) (Field "isStateful" msg') -maybe'allowedValues :: HasField "maybe'allowedValues" msg msg' => Lens msg msg' (Field "maybe'allowedValues" msg) (Field "maybe'allowedValues" msg') -maybe'defaultValue :: HasField "maybe'defaultValue" msg msg' => Lens msg msg' (Field "maybe'defaultValue" msg) (Field "maybe'defaultValue" msg') -maybe'deprecation :: HasField "maybe'deprecation" msg msg' => Lens msg msg' (Field "maybe'deprecation" msg) (Field "maybe'deprecation" msg') -minimum :: HasField "minimum" msg msg' => Lens msg msg' (Field "minimum" msg) (Field "minimum" msg') -name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') -numberAttr :: HasField "numberAttr" msg msg' => Lens msg msg' (Field "numberAttr" msg) (Field "numberAttr" msg') -op :: HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg') -outputArg :: HasField "outputArg" msg msg' => Lens msg msg' (Field "outputArg" msg) (Field "outputArg" msg') -summary :: HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg') -type' :: HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg') -typeAttr :: HasField "typeAttr" msg msg' => Lens msg msg' (Field "typeAttr" msg) (Field "typeAttr" msg') -typeListAttr :: HasField "typeListAttr" msg msg' => Lens msg msg' (Field "typeListAttr" msg) (Field "typeListAttr" msg') -version :: HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpList -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpList -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "inputArg" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "outputArg" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "attr" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "deprecation" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "maybe'deprecation" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "summary" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "description" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "isCommutative" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "isAggregate" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "isStateful" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "allowsUninitializedInput" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpDef -instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.ProtoLens.Field.HasField "description" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.ProtoLens.Field.HasField "type'" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.ProtoLens.Field.HasField "typeAttr" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.ProtoLens.Field.HasField "numberAttr" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.ProtoLens.Field.HasField "typeListAttr" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.ProtoLens.Field.HasField "isRef" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef -instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Field.HasField "type'" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Field.HasField "defaultValue" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Field.HasField "maybe'defaultValue" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Field.HasField "description" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Field.HasField "hasMinimum" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Field.HasField "minimum" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Field.HasField "allowedValues" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Field.HasField "maybe'allowedValues" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef -instance Data.ProtoLens.Field.HasField "version" Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation -instance Data.ProtoLens.Field.HasField "explanation" Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation -instance Data.ProtoLens.Field.HasField "op" Proto.Tensorflow.Core.Framework.OpDef.OpList Proto.Tensorflow.Core.Framework.OpDef.OpList -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpList -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpList - -module Proto.Tensorflow.Core.Framework.Graph -data GraphDef -GraphDef :: ![NodeDef] -> !(Maybe VersionDef) -> !Int32 -> !(Maybe FunctionDefLibrary) -> GraphDef -[_GraphDef'node] :: GraphDef -> ![NodeDef] -[_GraphDef'versions] :: GraphDef -> !(Maybe VersionDef) -[_GraphDef'version] :: GraphDef -> !Int32 -[_GraphDef'library] :: GraphDef -> !(Maybe FunctionDefLibrary) -library :: HasField "library" msg msg' => Lens msg msg' (Field "library" msg) (Field "library" msg') -maybe'library :: HasField "maybe'library" msg msg' => Lens msg msg' (Field "maybe'library" msg) (Field "maybe'library" msg') -maybe'versions :: HasField "maybe'versions" msg msg' => Lens msg msg' (Field "maybe'versions" msg) (Field "maybe'versions" msg') -node :: HasField "node" msg msg' => Lens msg msg' (Field "node" msg) (Field "node" msg') -version :: HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg') -versions :: HasField "versions" msg msg' => Lens msg msg' (Field "versions" msg) (Field "versions" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Graph.GraphDef -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Graph.GraphDef -instance Data.ProtoLens.Field.HasField "node" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef -instance Data.ProtoLens.Field.HasField "versions" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef -instance Data.ProtoLens.Field.HasField "maybe'versions" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef -instance Data.ProtoLens.Field.HasField "version" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef -instance Data.ProtoLens.Field.HasField "library" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef -instance Data.ProtoLens.Field.HasField "maybe'library" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef -instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Graph.GraphDef -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Graph.GraphDef - -module Proto.Tensorflow.Core.Protobuf.Config -data ConfigProto -ConfigProto :: !(Map Text Int32) -> !Int32 -> !Int32 -> !Bool -> ![ThreadPoolOptionProto] -> !Int32 -> ![Text] -> !(Maybe GPUOptions) -> !Bool -> !Bool -> !(Maybe GraphOptions) -> !Int64 -> !(Maybe RPCOptions) -> ConfigProto -[_ConfigProto'deviceCount] :: ConfigProto -> !(Map Text Int32) -[_ConfigProto'intraOpParallelismThreads] :: ConfigProto -> !Int32 -[_ConfigProto'interOpParallelismThreads] :: ConfigProto -> !Int32 -[_ConfigProto'usePerSessionThreads] :: ConfigProto -> !Bool -[_ConfigProto'sessionInterOpThreadPool] :: ConfigProto -> ![ThreadPoolOptionProto] -[_ConfigProto'placementPeriod] :: ConfigProto -> !Int32 -[_ConfigProto'deviceFilters] :: ConfigProto -> ![Text] -[_ConfigProto'gpuOptions] :: ConfigProto -> !(Maybe GPUOptions) -[_ConfigProto'allowSoftPlacement] :: ConfigProto -> !Bool -[_ConfigProto'logDevicePlacement] :: ConfigProto -> !Bool -[_ConfigProto'graphOptions] :: ConfigProto -> !(Maybe GraphOptions) -[_ConfigProto'operationTimeoutInMs] :: ConfigProto -> !Int64 -[_ConfigProto'rpcOptions] :: ConfigProto -> !(Maybe RPCOptions) -data ConfigProto'DeviceCountEntry -ConfigProto'DeviceCountEntry :: !Text -> !Int32 -> ConfigProto'DeviceCountEntry -[_ConfigProto'DeviceCountEntry'key] :: ConfigProto'DeviceCountEntry -> !Text -[_ConfigProto'DeviceCountEntry'value] :: ConfigProto'DeviceCountEntry -> !Int32 -data GPUOptions -GPUOptions :: !Double -> !Text -> !Int64 -> !Bool -> !Text -> GPUOptions -[_GPUOptions'perProcessGpuMemoryFraction] :: GPUOptions -> !Double -[_GPUOptions'allocatorType] :: GPUOptions -> !Text -[_GPUOptions'deferredDeletionBytes] :: GPUOptions -> !Int64 -[_GPUOptions'allowGrowth] :: GPUOptions -> !Bool -[_GPUOptions'visibleDeviceList] :: GPUOptions -> !Text -data GraphOptions -GraphOptions :: !Bool -> !(Maybe OptimizerOptions) -> !Int64 -> !Int64 -> !Bool -> !Bool -> !Bool -> !Int32 -> GraphOptions -[_GraphOptions'enableRecvScheduling] :: GraphOptions -> !Bool -[_GraphOptions'optimizerOptions] :: GraphOptions -> !(Maybe OptimizerOptions) -[_GraphOptions'buildCostModel] :: GraphOptions -> !Int64 -[_GraphOptions'buildCostModelAfter] :: GraphOptions -> !Int64 -[_GraphOptions'inferShapes] :: GraphOptions -> !Bool -[_GraphOptions'placePrunedGraph] :: GraphOptions -> !Bool -[_GraphOptions'enableBfloat16Sendrecv] :: GraphOptions -> !Bool -[_GraphOptions'timelineStep] :: GraphOptions -> !Int32 -data OptimizerOptions -OptimizerOptions :: !Bool -> !Bool -> !Bool -> !OptimizerOptions'Level -> !OptimizerOptions'GlobalJitLevel -> OptimizerOptions -[_OptimizerOptions'doCommonSubexpressionElimination] :: OptimizerOptions -> !Bool -[_OptimizerOptions'doConstantFolding] :: OptimizerOptions -> !Bool -[_OptimizerOptions'doFunctionInlining] :: OptimizerOptions -> !Bool -[_OptimizerOptions'optLevel] :: OptimizerOptions -> !OptimizerOptions'Level -[_OptimizerOptions'globalJitLevel] :: OptimizerOptions -> !OptimizerOptions'GlobalJitLevel -data OptimizerOptions'GlobalJitLevel -OptimizerOptions'OFF :: OptimizerOptions'GlobalJitLevel -OptimizerOptions'DEFAULT :: OptimizerOptions'GlobalJitLevel -OptimizerOptions'ON_1 :: OptimizerOptions'GlobalJitLevel -OptimizerOptions'ON_2 :: OptimizerOptions'GlobalJitLevel -data OptimizerOptions'Level -OptimizerOptions'L0 :: OptimizerOptions'Level -OptimizerOptions'L1 :: OptimizerOptions'Level -data RPCOptions -RPCOptions :: !Bool -> RPCOptions -[_RPCOptions'useRpcForInprocessMaster] :: RPCOptions -> !Bool -data RunMetadata -RunMetadata :: !(Maybe StepStats) -> !(Maybe CostGraphDef) -> ![GraphDef] -> RunMetadata -[_RunMetadata'stepStats] :: RunMetadata -> !(Maybe StepStats) -[_RunMetadata'costGraph] :: RunMetadata -> !(Maybe CostGraphDef) -[_RunMetadata'partitionGraphs] :: RunMetadata -> ![GraphDef] -data RunOptions -RunOptions :: !RunOptions'TraceLevel -> !Int64 -> !Int32 -> !Bool -> !(Maybe DebugOptions) -> RunOptions -[_RunOptions'traceLevel] :: RunOptions -> !RunOptions'TraceLevel -[_RunOptions'timeoutInMs] :: RunOptions -> !Int64 -[_RunOptions'interOpThreadPool] :: RunOptions -> !Int32 -[_RunOptions'outputPartitionGraphs] :: RunOptions -> !Bool -[_RunOptions'debugOptions] :: RunOptions -> !(Maybe DebugOptions) -data RunOptions'TraceLevel -RunOptions'NO_TRACE :: RunOptions'TraceLevel -RunOptions'SOFTWARE_TRACE :: RunOptions'TraceLevel -RunOptions'HARDWARE_TRACE :: RunOptions'TraceLevel -RunOptions'FULL_TRACE :: RunOptions'TraceLevel -data ThreadPoolOptionProto -ThreadPoolOptionProto :: !Int32 -> ThreadPoolOptionProto -[_ThreadPoolOptionProto'numThreads] :: ThreadPoolOptionProto -> !Int32 -allocatorType :: HasField "allocatorType" msg msg' => Lens msg msg' (Field "allocatorType" msg) (Field "allocatorType" msg') -allowGrowth :: HasField "allowGrowth" msg msg' => Lens msg msg' (Field "allowGrowth" msg) (Field "allowGrowth" msg') -allowSoftPlacement :: HasField "allowSoftPlacement" msg msg' => Lens msg msg' (Field "allowSoftPlacement" msg) (Field "allowSoftPlacement" msg') -buildCostModel :: HasField "buildCostModel" msg msg' => Lens msg msg' (Field "buildCostModel" msg) (Field "buildCostModel" msg') -buildCostModelAfter :: HasField "buildCostModelAfter" msg msg' => Lens msg msg' (Field "buildCostModelAfter" msg) (Field "buildCostModelAfter" msg') -costGraph :: HasField "costGraph" msg msg' => Lens msg msg' (Field "costGraph" msg) (Field "costGraph" msg') -debugOptions :: HasField "debugOptions" msg msg' => Lens msg msg' (Field "debugOptions" msg) (Field "debugOptions" msg') -deferredDeletionBytes :: HasField "deferredDeletionBytes" msg msg' => Lens msg msg' (Field "deferredDeletionBytes" msg) (Field "deferredDeletionBytes" msg') -deviceCount :: HasField "deviceCount" msg msg' => Lens msg msg' (Field "deviceCount" msg) (Field "deviceCount" msg') -deviceFilters :: HasField "deviceFilters" msg msg' => Lens msg msg' (Field "deviceFilters" msg) (Field "deviceFilters" msg') -doCommonSubexpressionElimination :: HasField "doCommonSubexpressionElimination" msg msg' => Lens msg msg' (Field "doCommonSubexpressionElimination" msg) (Field "doCommonSubexpressionElimination" msg') -doConstantFolding :: HasField "doConstantFolding" msg msg' => Lens msg msg' (Field "doConstantFolding" msg) (Field "doConstantFolding" msg') -doFunctionInlining :: HasField "doFunctionInlining" msg msg' => Lens msg msg' (Field "doFunctionInlining" msg) (Field "doFunctionInlining" msg') -enableBfloat16Sendrecv :: HasField "enableBfloat16Sendrecv" msg msg' => Lens msg msg' (Field "enableBfloat16Sendrecv" msg) (Field "enableBfloat16Sendrecv" msg') -enableRecvScheduling :: HasField "enableRecvScheduling" msg msg' => Lens msg msg' (Field "enableRecvScheduling" msg) (Field "enableRecvScheduling" msg') -globalJitLevel :: HasField "globalJitLevel" msg msg' => Lens msg msg' (Field "globalJitLevel" msg) (Field "globalJitLevel" msg') -gpuOptions :: HasField "gpuOptions" msg msg' => Lens msg msg' (Field "gpuOptions" msg) (Field "gpuOptions" msg') -graphOptions :: HasField "graphOptions" msg msg' => Lens msg msg' (Field "graphOptions" msg) (Field "graphOptions" msg') -inferShapes :: HasField "inferShapes" msg msg' => Lens msg msg' (Field "inferShapes" msg) (Field "inferShapes" msg') -interOpParallelismThreads :: HasField "interOpParallelismThreads" msg msg' => Lens msg msg' (Field "interOpParallelismThreads" msg) (Field "interOpParallelismThreads" msg') -interOpThreadPool :: HasField "interOpThreadPool" msg msg' => Lens msg msg' (Field "interOpThreadPool" msg) (Field "interOpThreadPool" msg') -intraOpParallelismThreads :: HasField "intraOpParallelismThreads" msg msg' => Lens msg msg' (Field "intraOpParallelismThreads" msg) (Field "intraOpParallelismThreads" msg') -key :: HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') -logDevicePlacement :: HasField "logDevicePlacement" msg msg' => Lens msg msg' (Field "logDevicePlacement" msg) (Field "logDevicePlacement" msg') -maybe'costGraph :: HasField "maybe'costGraph" msg msg' => Lens msg msg' (Field "maybe'costGraph" msg) (Field "maybe'costGraph" msg') -maybe'debugOptions :: HasField "maybe'debugOptions" msg msg' => Lens msg msg' (Field "maybe'debugOptions" msg) (Field "maybe'debugOptions" msg') -maybe'gpuOptions :: HasField "maybe'gpuOptions" msg msg' => Lens msg msg' (Field "maybe'gpuOptions" msg) (Field "maybe'gpuOptions" msg') -maybe'graphOptions :: HasField "maybe'graphOptions" msg msg' => Lens msg msg' (Field "maybe'graphOptions" msg) (Field "maybe'graphOptions" msg') -maybe'optimizerOptions :: HasField "maybe'optimizerOptions" msg msg' => Lens msg msg' (Field "maybe'optimizerOptions" msg) (Field "maybe'optimizerOptions" msg') -maybe'rpcOptions :: HasField "maybe'rpcOptions" msg msg' => Lens msg msg' (Field "maybe'rpcOptions" msg) (Field "maybe'rpcOptions" msg') -maybe'stepStats :: HasField "maybe'stepStats" msg msg' => Lens msg msg' (Field "maybe'stepStats" msg) (Field "maybe'stepStats" msg') -numThreads :: HasField "numThreads" msg msg' => Lens msg msg' (Field "numThreads" msg) (Field "numThreads" msg') -operationTimeoutInMs :: HasField "operationTimeoutInMs" msg msg' => Lens msg msg' (Field "operationTimeoutInMs" msg) (Field "operationTimeoutInMs" msg') -optLevel :: HasField "optLevel" msg msg' => Lens msg msg' (Field "optLevel" msg) (Field "optLevel" msg') -optimizerOptions :: HasField "optimizerOptions" msg msg' => Lens msg msg' (Field "optimizerOptions" msg) (Field "optimizerOptions" msg') -outputPartitionGraphs :: HasField "outputPartitionGraphs" msg msg' => Lens msg msg' (Field "outputPartitionGraphs" msg) (Field "outputPartitionGraphs" msg') -partitionGraphs :: HasField "partitionGraphs" msg msg' => Lens msg msg' (Field "partitionGraphs" msg) (Field "partitionGraphs" msg') -perProcessGpuMemoryFraction :: HasField "perProcessGpuMemoryFraction" msg msg' => Lens msg msg' (Field "perProcessGpuMemoryFraction" msg) (Field "perProcessGpuMemoryFraction" msg') -placePrunedGraph :: HasField "placePrunedGraph" msg msg' => Lens msg msg' (Field "placePrunedGraph" msg) (Field "placePrunedGraph" msg') -placementPeriod :: HasField "placementPeriod" msg msg' => Lens msg msg' (Field "placementPeriod" msg) (Field "placementPeriod" msg') -rpcOptions :: HasField "rpcOptions" msg msg' => Lens msg msg' (Field "rpcOptions" msg) (Field "rpcOptions" msg') -sessionInterOpThreadPool :: HasField "sessionInterOpThreadPool" msg msg' => Lens msg msg' (Field "sessionInterOpThreadPool" msg) (Field "sessionInterOpThreadPool" msg') -stepStats :: HasField "stepStats" msg msg' => Lens msg msg' (Field "stepStats" msg) (Field "stepStats" msg') -timelineStep :: HasField "timelineStep" msg msg' => Lens msg msg' (Field "timelineStep" msg) (Field "timelineStep" msg') -timeoutInMs :: HasField "timeoutInMs" msg msg' => Lens msg msg' (Field "timeoutInMs" msg) (Field "timeoutInMs" msg') -traceLevel :: HasField "traceLevel" msg msg' => Lens msg msg' (Field "traceLevel" msg) (Field "traceLevel" msg') -usePerSessionThreads :: HasField "usePerSessionThreads" msg msg' => Lens msg msg' (Field "usePerSessionThreads" msg) (Field "usePerSessionThreads" msg') -useRpcForInprocessMaster :: HasField "useRpcForInprocessMaster" msg msg' => Lens msg msg' (Field "useRpcForInprocessMaster" msg) (Field "useRpcForInprocessMaster" msg') -value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') -visibleDeviceList :: HasField "visibleDeviceList" msg msg' => Lens msg msg' (Field "visibleDeviceList" msg) (Field "visibleDeviceList" msg') -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RunMetadata -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RunMetadata -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RPCOptions -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RPCOptions -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry -instance Data.ProtoLens.Field.HasField "deviceCount" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "intraOpParallelismThreads" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "interOpParallelismThreads" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "usePerSessionThreads" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "sessionInterOpThreadPool" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "placementPeriod" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "deviceFilters" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "gpuOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "maybe'gpuOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "allowSoftPlacement" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "logDevicePlacement" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "graphOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "maybe'graphOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "operationTimeoutInMs" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "rpcOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "maybe'rpcOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.ConfigProto -instance Data.ProtoLens.Field.HasField "key" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry -instance Data.ProtoLens.Field.HasField "value" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry -instance Data.ProtoLens.Field.HasField "perProcessGpuMemoryFraction" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance Data.ProtoLens.Field.HasField "allocatorType" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance Data.ProtoLens.Field.HasField "deferredDeletionBytes" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance Data.ProtoLens.Field.HasField "allowGrowth" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance Data.ProtoLens.Field.HasField "visibleDeviceList" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance Data.ProtoLens.Field.HasField "enableRecvScheduling" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Field.HasField "optimizerOptions" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Field.HasField "maybe'optimizerOptions" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Field.HasField "buildCostModel" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Field.HasField "buildCostModelAfter" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Field.HasField "inferShapes" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Field.HasField "placePrunedGraph" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Field.HasField "enableBfloat16Sendrecv" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Field.HasField "timelineStep" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.GraphOptions -instance Data.ProtoLens.Field.HasField "doCommonSubexpressionElimination" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions -instance Data.ProtoLens.Field.HasField "doConstantFolding" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions -instance Data.ProtoLens.Field.HasField "doFunctionInlining" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions -instance Data.ProtoLens.Field.HasField "optLevel" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions -instance Data.ProtoLens.Field.HasField "globalJitLevel" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel -instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel -instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel -instance GHC.Enum.Enum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel -instance GHC.Enum.Bounded Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level -instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level -instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level -instance GHC.Enum.Enum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level -instance GHC.Enum.Bounded Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level -instance Data.ProtoLens.Field.HasField "useRpcForInprocessMaster" Proto.Tensorflow.Core.Protobuf.Config.RPCOptions Proto.Tensorflow.Core.Protobuf.Config.RPCOptions -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RPCOptions -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.RPCOptions -instance Data.ProtoLens.Field.HasField "stepStats" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata -instance Data.ProtoLens.Field.HasField "maybe'stepStats" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata -instance Data.ProtoLens.Field.HasField "costGraph" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata -instance Data.ProtoLens.Field.HasField "maybe'costGraph" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata -instance Data.ProtoLens.Field.HasField "partitionGraphs" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RunMetadata -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.RunMetadata -instance Data.ProtoLens.Field.HasField "traceLevel" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance Data.ProtoLens.Field.HasField "timeoutInMs" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance Data.ProtoLens.Field.HasField "interOpThreadPool" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance Data.ProtoLens.Field.HasField "outputPartitionGraphs" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance Data.ProtoLens.Field.HasField "debugOptions" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance Data.ProtoLens.Field.HasField "maybe'debugOptions" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel -instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel -instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel -instance GHC.Enum.Enum Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel -instance GHC.Enum.Bounded Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel -instance Data.ProtoLens.Field.HasField "numThreads" Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html b/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html deleted file mode 100644 index ab9d4e3..0000000 --- a/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html +++ /dev/null @@ -1,8 +0,0 @@ -TensorFlow.Queue

                              tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

                              Safe HaskellNone
                              LanguageHaskell2010

                              TensorFlow.Queue

                              Description

                              Queues in TensorFlow graph. Very limited support for now.

                              Synopsis

                              Documentation

                              data Queue as

                              A queue carrying tuples.

                              makeQueue

                              Arguments

                              :: (MonadBuild m, TensorTypes as) 
                              => Int64

                              The upper bound on the number of elements in - this queue. Negative numbers mean no limit.

                              -> ByteString

                              If non-empty, this queue will be shared - under the given name across multiple sessions.

                              -> m (Queue as) 

                              Creates a new queue with the given capacity and shared name.

                              enqueue :: forall as v m. (MonadBuild m, TensorTypes as) => Queue as -> TensorList v as -> m ControlNode

                              Adds the given values to the queue.

                              dequeue

                              Arguments

                              :: (MonadBuild m, TensorTypes as) 
                              => Queue as 
                              -> m (TensorList Value as)

                              Dequeued tensors. They are coupled in a sense - that values appear together, even if they are - not consumed together.

                              Retrieves the values from the queue.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html deleted file mode 100644 index a841881..0000000 --- a/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues. (Index)

                              tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/frames.html b/docs/haddock/tensorflow-queue-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-queue-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-queue-0.1.0.0/hslogo-16.png deleted file mode 100644 index 0ff8579fbd897417b0d6dad6e920f8882138a7c0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues. \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/index.html b/docs/haddock/tensorflow-queue-0.1.0.0/index.html deleted file mode 100644 index a905134..0000000 --- a/docs/haddock/tensorflow-queue-0.1.0.0/index.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

                              tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

                              tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

                              Please see README.md

                              Modules

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/minus.gif b/docs/haddock/tensorflow-queue-0.1.0.0/minus.gif deleted file mode 100644 index 1deac2fe1a42e35b994f1b855488f392c50f6a89..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 56 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-queue-0.1.0.0/synopsis.png deleted file mode 100644 index 85fb86ec84907bcc86531dc82871948ff4d471fa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt b/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt deleted file mode 100644 index 1f71b85..0000000 --- a/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt +++ /dev/null @@ -1,25 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | Basic access to TensorFlow queues. --- --- Please see README.md -@package tensorflow-queue -@version 0.1.0.0 - - --- | Queues in TensorFlow graph. Very limited support for now. -module TensorFlow.Queue - --- | A queue carrying tuples. -data Queue (as :: [*]) - --- | Creates a new queue with the given capacity and shared name. -makeQueue :: (MonadBuild m, TensorTypes as) => Int64 -> ByteString -> m (Queue as) - --- | Adds the given values to the queue. -enqueue :: (MonadBuild m, TensorTypes as) => Queue as -> TensorList v as -> m ControlNode - --- | Retrieves the values from the queue. -dequeue :: (MonadBuild m, TensorTypes as) => Queue as -> m (TensorList Value as) diff --git a/docs/haddock/tensorflow-records-0.1.0.0/LICENSE b/docs/haddock/tensorflow-records-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-records-0.1.0.0/TensorFlow-Records.html b/docs/haddock/tensorflow-records-0.1.0.0/TensorFlow-Records.html index 63effe4..9553f95 100644 --- a/docs/haddock/tensorflow-records-0.1.0.0/TensorFlow-Records.html +++ b/docs/haddock/tensorflow-records-0.1.0.0/TensorFlow-Records.html @@ -1,6 +1,6 @@ -TensorFlow.Records

                              tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

                              Safe HaskellNone
                              LanguageHaskell2010

                              TensorFlow.Records

                              Description

                              Encoder and decoder for the TensorFlow "TFRecords" format.

                              Records

                              putTFRecord :: ByteString -> Put

                              Put one TFRecord with the given contents.

                              getTFRecord :: Get ByteString

                              Parse one TFRecord.

                              getTFRecords :: Get [ByteString]

                              Parse many TFRecords as a list. Note you probably want streaming instead +

                              tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

                              Safe HaskellNone
                              LanguageHaskell2010

                              TensorFlow.Records

                              Description

                              Encoder and decoder for the TensorFlow "TFRecords" format.

                              Records

                              putTFRecord :: ByteString -> Put Source #

                              Put one TFRecord with the given contents.

                              getTFRecord :: Get ByteString Source #

                              Parse one TFRecord.

                              getTFRecords :: Get [ByteString] Source #

                              Parse many TFRecords as a list. Note you probably want streaming instead as provided by the tensorflow-records-conduit package.

                              Implementation

                              These may be useful for encoding or decoding to types other than - ByteString that have their own Cereal codecs.

                              getTFRecordLength :: Get Word64

                              Get a length and verify its checksum.

                              getTFRecordData :: Word64 -> Get ByteString

                              Get a record payload and verify its checksum.

                              putTFRecordLength :: Word64 -> Put

                              Put a record length and its checksum.

                              putTFRecordData :: ByteString -> Put

                              Put a record payload and its checksum.

                              \ No newline at end of file + ByteString that have their own Cereal codecs.

                              getTFRecordLength :: Get Word64 Source #

                              Get a length and verify its checksum.

                              getTFRecordData :: Word64 -> Get ByteString Source #

                              Get a record payload and verify its checksum.

                              putTFRecordLength :: Word64 -> Put Source #

                              Put a record length and its checksum.

                              putTFRecordData :: ByteString -> Put Source #

                              Put a record payload and its checksum.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-records-0.1.0.0/doc-index.html index a149abc..4fa3c00 100644 --- a/docs/haddock/tensorflow-records-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-records-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format. (Index)

                              tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

                              Index

                              getTFRecordTensorFlow.Records
                              getTFRecordDataTensorFlow.Records
                              getTFRecordLengthTensorFlow.Records
                              getTFRecordsTensorFlow.Records
                              putTFRecordTensorFlow.Records
                              putTFRecordDataTensorFlow.Records
                              putTFRecordLengthTensorFlow.Records
                              \ No newline at end of file +

                              tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

                              Index

                              getTFRecordTensorFlow.Records
                              getTFRecordDataTensorFlow.Records
                              getTFRecordLengthTensorFlow.Records
                              getTFRecordsTensorFlow.Records
                              putTFRecordTensorFlow.Records
                              putTFRecordDataTensorFlow.Records
                              putTFRecordLengthTensorFlow.Records
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/frames.html b/docs/haddock/tensorflow-records-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-records-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-records-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-records-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-records-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-records-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-records-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-records-0.1.0.0/index-frames.html deleted file mode 100644 index 30dacc0..0000000 --- a/docs/haddock/tensorflow-records-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format. \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/index.html b/docs/haddock/tensorflow-records-0.1.0.0/index.html index 88ed363..0f960a6 100644 --- a/docs/haddock/tensorflow-records-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-records-0.1.0.0/index.html @@ -1,4 +1,4 @@ -tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

                              tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

                              tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

                              Encoder and decoder for the TensorFlow "TFRecords" format.

                              Modules

                              \ No newline at end of file +

                              tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

                              tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

                              Encoder and decoder for the TensorFlow "TFRecords" format.

                              Modules

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/mini_TensorFlow-Records.html b/docs/haddock/tensorflow-records-0.1.0.0/mini_TensorFlow-Records.html index 0dda493..e435454 100644 --- a/docs/haddock/tensorflow-records-0.1.0.0/mini_TensorFlow-Records.html +++ b/docs/haddock/tensorflow-records-0.1.0.0/mini_TensorFlow-Records.html @@ -1,4 +1,4 @@ -TensorFlow.Records

                              TensorFlow.Records

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/ocean.css b/docs/haddock/tensorflow-records-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-records-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-records-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-records-0.1.0.0/src/TensorFlow.CRC32C.html b/docs/haddock/tensorflow-records-0.1.0.0/src/TensorFlow.CRC32C.html new file mode 100644 index 0000000..d7f4a35 --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/src/TensorFlow.CRC32C.html @@ -0,0 +1,62 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +module TensorFlow.CRC32C
                              +  ( crc32c
                              +  , crc32cLBS
                              +  , crc32cUpdate
                              +  , crc32cMasked
                              +  , crc32cLBSMasked
                              +  , crc32cMask
                              +  , crc32cUnmask
                              +  ) where
                              +
                              +import Data.Bits (rotateL, rotateR)
                              +import qualified Data.ByteString as B
                              +import qualified Data.ByteString.Lazy as BL
                              +import Data.Digest.CRC32C (crc32c, crc32c_update)
                              +import Data.List (foldl')
                              +import Data.Word (Word32)
                              +
                              +-- | Compute the CRC32C checksum of the concatenation of the bytes checksummed
                              +-- by the given CRC32C value and the bytes in the given ByteString.
                              +crc32cUpdate :: Word32 -> B.ByteString -> Word32
                              +crc32cUpdate = crc32c_update
                              +
                              +-- | Compute the CRC32C checksum of the given bytes.
                              +crc32cLBS :: BL.ByteString -> Word32
                              +crc32cLBS = foldl' crc32cUpdate 0 . BL.toChunks
                              +
                              +-- | Scramble a CRC32C value so that the result can be safely stored in a
                              +-- bytestream that may itself be CRC'd.
                              +--
                              +-- This masking is the algorithm specified by TensorFlow's TFRecords format.
                              +crc32cMask :: Word32 -> Word32
                              +crc32cMask x = rotateR x 15 + maskDelta
                              +
                              +-- | Inverse of 'crc32cMask'.
                              +crc32cUnmask :: Word32 -> Word32
                              +crc32cUnmask x = rotateL (x - maskDelta) 15
                              +
                              +-- | Convenience function combining 'crc32c' and 'crc32cMask'.
                              +crc32cMasked :: B.ByteString -> Word32
                              +crc32cMasked = crc32cMask . crc32c
                              +
                              +-- | Convenience function combining 'crc32cLBS' and 'crc32cMask'.
                              +crc32cLBSMasked :: BL.ByteString -> Word32
                              +crc32cLBSMasked = crc32cMask . crc32cLBS
                              +
                              +maskDelta :: Word32
                              +maskDelta = 0xa282ead8
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/src/TensorFlow.Records.html b/docs/haddock/tensorflow-records-0.1.0.0/src/TensorFlow.Records.html new file mode 100644 index 0000000..d63e583 --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/src/TensorFlow.Records.html @@ -0,0 +1,135 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +-- | Encoder and decoder for the TensorFlow \"TFRecords\" format.
                              +
                              +{-# LANGUAGE Rank2Types #-}
                              +module TensorFlow.Records
                              +  (
                              +  -- * Records
                              +    putTFRecord
                              +  , getTFRecord
                              +  , getTFRecords
                              +
                              +  -- * Implementation
                              +
                              +  -- | These may be useful for encoding or decoding to types other than
                              +  -- 'ByteString' that have their own Cereal codecs.
                              +  , getTFRecordLength
                              +  , getTFRecordData
                              +  , putTFRecordLength
                              +  , putTFRecordData
                              +  ) where
                              +
                              +import Control.Exception (evaluate)
                              +import Control.Monad (when)
                              +import Data.ByteString.Unsafe (unsafePackCStringLen)
                              +import qualified Data.ByteString.Builder as B (Builder)
                              +import Data.ByteString.Builder.Extra (runBuilder, Next(..))
                              +import qualified Data.ByteString.Lazy as BL
                              +import Data.Serialize.Get
                              +  ( Get
                              +  , getBytes
                              +  , getWord32le
                              +  , getWord64le
                              +  , getLazyByteString
                              +  , isEmpty
                              +  , lookAhead
                              +  )
                              +import Data.Serialize
                              +  ( Put
                              +  , execPut
                              +  , putLazyByteString
                              +  , putWord32le
                              +  , putWord64le
                              +  )
                              +import Data.Word (Word8, Word64)
                              +import Foreign.Marshal.Alloc (allocaBytes)
                              +import Foreign.Ptr (Ptr, castPtr)
                              +import System.IO.Unsafe (unsafePerformIO)
                              +
                              +import TensorFlow.CRC32C (crc32cLBSMasked, crc32cUpdate, crc32cMask)
                              +
                              +-- | Parse one TFRecord.
                              +getTFRecord :: Get BL.ByteString
                              +getTFRecord = getTFRecordLength >>= getTFRecordData
                              +
                              +-- | Parse many TFRecords as a list.  Note you probably want streaming instead
                              +-- as provided by the tensorflow-records-conduit package.
                              +getTFRecords :: Get [BL.ByteString]
                              +getTFRecords = do
                              +  e <- isEmpty
                              +  if e then return [] else (:) <$> getTFRecord <*> getTFRecords
                              +
                              +getCheckMaskedCRC32C :: BL.ByteString -> Get ()
                              +getCheckMaskedCRC32C bs = do
                              +  wireCRC <- getWord32le
                              +  let maskedCRC = crc32cLBSMasked bs
                              +  when (maskedCRC /= wireCRC) $ fail $
                              +      "getCheckMaskedCRC32C: CRC mismatch, computed: " ++ show maskedCRC ++
                              +      ", expected: " ++ show wireCRC
                              +
                              +-- | Get a length and verify its checksum.
                              +getTFRecordLength :: Get Word64
                              +getTFRecordLength = do
                              +  buf <- lookAhead (getBytes 8)
                              +  getWord64le <* getCheckMaskedCRC32C (BL.fromStrict buf)
                              +
                              +-- | Get a record payload and verify its checksum.
                              +getTFRecordData :: Word64 -> Get BL.ByteString
                              +getTFRecordData len = if len > 0x7fffffffffffffff
                              +  then fail "getTFRecordData: Record size overflows Int64"
                              +  else do
                              +    bs <- getLazyByteString (fromIntegral len)
                              +    getCheckMaskedCRC32C bs
                              +    return bs
                              +
                              +putMaskedCRC32C :: BL.ByteString -> Put
                              +putMaskedCRC32C = putWord32le . crc32cLBSMasked
                              +
                              +-- Runs a Builder that's known to write a fixed number of bytes on an 'alloca'
                              +-- buffer, and runs the given IO action on the result.  Raises exceptions if
                              +-- the Builder yields ByteString chunks or attempts to write more bytes than
                              +-- expected.
                              +unsafeWithFixedWidthBuilder :: Int -> B.Builder -> (Ptr Word8 -> IO r) -> IO r
                              +unsafeWithFixedWidthBuilder n b act = allocaBytes n $ \ptr -> do
                              +  (_, signal) <- runBuilder b ptr n
                              +  case signal of
                              +    Done -> act ptr
                              +    More _ _ -> error "unsafeWithFixedWidthBuilder: Builder returned More."
                              +    Chunk _ _ -> error "unsafeWithFixedWidthBuilder: Builder returned Chunk."
                              +
                              +-- | Put a record length and its checksum.
                              +putTFRecordLength :: Word64 -> Put
                              +putTFRecordLength x =
                              +  let put = putWord64le x
                              +      len = 8
                              +      crc = crc32cMask $ unsafePerformIO $
                              +          -- Serialized Word64 is always 8 bytes, so we can go fast by using
                              +          -- alloca.
                              +          unsafeWithFixedWidthBuilder len (execPut put) $ \ptr -> do
                              +              str <- unsafePackCStringLen (castPtr ptr, len)
                              +              -- Force the result to ensure it's evaluated before freeing ptr.
                              +              evaluate $ crc32cUpdate 0 str
                              +  in  put *> putWord32le crc
                              +
                              +-- | Put a record payload and its checksum.
                              +putTFRecordData :: BL.ByteString -> Put
                              +putTFRecordData bs = putLazyByteString bs *> putMaskedCRC32C bs
                              +
                              +-- | Put one TFRecord with the given contents.
                              +putTFRecord :: BL.ByteString -> Put
                              +putTFRecord bs =
                              +  putTFRecordLength (fromIntegral $ BL.length bs) *> putTFRecordData bs
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-records-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-records-0.1.0.0/src/style.css b/docs/haddock/tensorflow-records-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-records-0.1.0.0/tensorflow-records.txt b/docs/haddock/tensorflow-records-0.1.0.0/tensorflow-records.txt deleted file mode 100644 index 6b34367..0000000 --- a/docs/haddock/tensorflow-records-0.1.0.0/tensorflow-records.txt +++ /dev/null @@ -1,35 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | Encoder and decoder for the TensorFlow \"TFRecords\" format. --- --- Encoder and decoder for the TensorFlow "TFRecords" format. -@package tensorflow-records -@version 0.1.0.0 - - --- | Encoder and decoder for the TensorFlow "TFRecords" format. -module TensorFlow.Records - --- | Put one TFRecord with the given contents. -putTFRecord :: ByteString -> Put - --- | Parse one TFRecord. -getTFRecord :: Get ByteString - --- | Parse many TFRecords as a list. Note you probably want streaming --- instead as provided by the tensorflow-records-conduit package. -getTFRecords :: Get [ByteString] - --- | Get a length and verify its checksum. -getTFRecordLength :: Get Word64 - --- | Get a record payload and verify its checksum. -getTFRecordData :: Word64 -> Get ByteString - --- | Put a record length and its checksum. -putTFRecordLength :: Word64 -> Put - --- | Put a record payload and its checksum. -putTFRecordData :: ByteString -> Put diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/LICENSE b/docs/haddock/tensorflow-records-conduit-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/TensorFlow-Records-Conduit.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/TensorFlow-Records-Conduit.html index c870056..16178ea 100644 --- a/docs/haddock/tensorflow-records-conduit-0.1.0.0/TensorFlow-Records-Conduit.html +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/TensorFlow-Records-Conduit.html @@ -1,4 +1,4 @@ -TensorFlow.Records.Conduit

                              tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

                              Safe HaskellNone
                              LanguageHaskell2010

                              TensorFlow.Records.Conduit

                              Description

                              Conduit wrappers for TensorFlow.Records.

                              Synopsis

                              Encode/Decode

                              encodeTFRecords :: Monad m => Conduit ByteString m ByteString

                              Encode TFRecords to a stream of bytes.

                              decodeTFRecords :: MonadThrow m => Conduit ByteString m ByteString

                              Decode TFRecords from a stream of bytes.

                              Source/Sink

                              sinkTFRecords :: MonadResource m => FilePath -> Consumer ByteString m ()

                              Write TFRecords to a file.

                              sourceTFRecords :: (MonadResource m, MonadThrow m) => FilePath -> Producer m ByteString

                              Read TFRecords from a file.

                              \ No newline at end of file +

                              tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

                              Safe HaskellNone
                              LanguageHaskell2010

                              TensorFlow.Records.Conduit

                              Description

                              Conduit wrappers for TensorFlow.Records.

                              Synopsis

                              Encode/Decode

                              encodeTFRecords :: Monad m => Conduit ByteString m ByteString Source #

                              Encode TFRecords to a stream of bytes.

                              decodeTFRecords :: MonadThrow m => Conduit ByteString m ByteString Source #

                              Decode TFRecords from a stream of bytes.

                              Source/Sink

                              sinkTFRecords :: MonadResource m => FilePath -> Consumer ByteString m () Source #

                              Write TFRecords to a file.

                              sourceTFRecords :: (MonadResource m, MonadThrow m) => FilePath -> Producer m ByteString Source #

                              Read TFRecords from a file.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/doc-index.html index 329b86b..f0e6802 100644 --- a/docs/haddock/tensorflow-records-conduit-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records. (Index)

                              tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

                              \ No newline at end of file +

                              tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/frames.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-records-conduit-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-records-conduit-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-records-conduit-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/index-frames.html deleted file mode 100644 index 758a8f3..0000000 --- a/docs/haddock/tensorflow-records-conduit-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records. \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/index.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/index.html index 1fc992b..0aae6b0 100644 --- a/docs/haddock/tensorflow-records-conduit-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/index.html @@ -1,4 +1,4 @@ -tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

                              tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

                              tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

                              Conduit wrappers for TensorFlow.Records.

                              Modules

                              \ No newline at end of file +

                              tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

                              tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

                              Conduit wrappers for TensorFlow.Records.

                              Modules

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/mini_TensorFlow-Records-Conduit.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/mini_TensorFlow-Records-Conduit.html index 5fc00bb..a33f12e 100644 --- a/docs/haddock/tensorflow-records-conduit-0.1.0.0/mini_TensorFlow-Records-Conduit.html +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/mini_TensorFlow-Records-Conduit.html @@ -1,4 +1,4 @@ -TensorFlow.Records.Conduit

                              TensorFlow.Records.Conduit

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/ocean.css b/docs/haddock/tensorflow-records-conduit-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-records-conduit-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/src/TensorFlow.Records.Conduit.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/src/TensorFlow.Records.Conduit.html new file mode 100644 index 0000000..75d1c0f --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/src/TensorFlow.Records.Conduit.html @@ -0,0 +1,54 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +-- | Conduit wrappers for TensorFlow.Records.
                              +
                              +{-# LANGUAGE Rank2Types #-}
                              +module TensorFlow.Records.Conduit
                              +  (
                              +  -- * Encode/Decode
                              +    encodeTFRecords
                              +  , decodeTFRecords
                              +
                              +  -- * Source/Sink
                              +  , sinkTFRecords
                              +  , sourceTFRecords
                              +  ) where
                              +
                              +import Control.Monad.Catch (MonadThrow)
                              +import Control.Monad.Trans.Resource (MonadResource)
                              +import qualified Data.ByteString as B
                              +import qualified Data.ByteString.Lazy as BL
                              +import Data.Conduit ((=$=), Conduit, Consumer, Producer)
                              +import Data.Conduit.Binary (sinkFile, sourceFile)
                              +import Data.Conduit.Cereal (conduitGet2, conduitPut)
                              +
                              +import TensorFlow.Records (getTFRecord, putTFRecord)
                              +
                              +-- | Decode TFRecords from a stream of bytes.
                              +decodeTFRecords :: MonadThrow m => Conduit B.ByteString m BL.ByteString
                              +decodeTFRecords = conduitGet2 getTFRecord
                              +
                              +-- | Read TFRecords from a file.
                              +sourceTFRecords :: (MonadResource m, MonadThrow m) => FilePath -> Producer m BL.ByteString
                              +sourceTFRecords path = sourceFile path =$= decodeTFRecords
                              +
                              +-- | Encode TFRecords to a stream of bytes.
                              +encodeTFRecords :: Monad m => Conduit BL.ByteString m B.ByteString
                              +encodeTFRecords = conduitPut putTFRecord
                              +
                              +-- | Write TFRecords to a file.
                              +sinkTFRecords :: (MonadResource m) => FilePath -> Consumer BL.ByteString m ()
                              +sinkTFRecords path = encodeTFRecords =$= sinkFile path
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-records-conduit-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/src/style.css b/docs/haddock/tensorflow-records-conduit-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/tensorflow-records-conduit.txt b/docs/haddock/tensorflow-records-conduit-0.1.0.0/tensorflow-records-conduit.txt deleted file mode 100644 index 8344fa9..0000000 --- a/docs/haddock/tensorflow-records-conduit-0.1.0.0/tensorflow-records-conduit.txt +++ /dev/null @@ -1,25 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | Conduit wrappers for TensorFlow.Records. --- --- Conduit wrappers for TensorFlow.Records. -@package tensorflow-records-conduit -@version 0.1.0.0 - - --- | Conduit wrappers for TensorFlow.Records. -module TensorFlow.Records.Conduit - --- | Encode TFRecords to a stream of bytes. -encodeTFRecords :: Monad m => Conduit ByteString m ByteString - --- | Decode TFRecords from a stream of bytes. -decodeTFRecords :: MonadThrow m => Conduit ByteString m ByteString - --- | Write TFRecords to a file. -sinkTFRecords :: (MonadResource m) => FilePath -> Consumer ByteString m () - --- | Read TFRecords from a file. -sourceTFRecords :: (MonadResource m, MonadThrow m) => FilePath -> Producer m ByteString diff --git a/docs/haddock/tensorflow-test-0.1.0.0/LICENSE b/docs/haddock/tensorflow-test-0.1.0.0/LICENSE new file mode 100644 index 0000000..f89eb33 --- /dev/null +++ b/docs/haddock/tensorflow-test-0.1.0.0/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/haddock/tensorflow-test-0.1.0.0/TensorFlow-Test.html b/docs/haddock/tensorflow-test-0.1.0.0/TensorFlow-Test.html index 4205a18..f6f229f 100644 --- a/docs/haddock/tensorflow-test-0.1.0.0/TensorFlow-Test.html +++ b/docs/haddock/tensorflow-test-0.1.0.0/TensorFlow-Test.html @@ -1,5 +1,5 @@ -TensorFlow.Test

                              tensorflow-test-0.1.0.0: Some common functions for test suites.

                              Safe HaskellNone
                              LanguageHaskell2010

                              TensorFlow.Test

                              Synopsis

                              Documentation

                              assertAllClose :: Vector Float -> Vector Float -> Assertion

                              Compares that the vectors are element-by-element equal within the given - tolerance. Raises an assertion and prints some information if not.

                              \ No newline at end of file +

                              tensorflow-test-0.1.0.0: Some common functions for test suites.

                              Safe HaskellNone
                              LanguageHaskell2010

                              TensorFlow.Test

                              Synopsis

                              Documentation

                              assertAllClose :: Vector Float -> Vector Float -> Assertion Source #

                              Compares that the vectors are element-by-element equal within the given + tolerance. Raises an assertion and prints some information if not.

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-test-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-test-0.1.0.0/doc-index.html index 783013f..f8a1750 100644 --- a/docs/haddock/tensorflow-test-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-test-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ -tensorflow-test-0.1.0.0: Some common functions for test suites. (Index)

                              tensorflow-test-0.1.0.0: Some common functions for test suites.

                              Index

                              assertAllCloseTensorFlow.Test
                              \ No newline at end of file +

                              tensorflow-test-0.1.0.0: Some common functions for test suites.

                              Index

                              assertAllCloseTensorFlow.Test
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-test-0.1.0.0/frames.html b/docs/haddock/tensorflow-test-0.1.0.0/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/docs/haddock/tensorflow-test-0.1.0.0/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/docs/haddock/tensorflow-test-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-test-0.1.0.0/haddock-util.js index 9a6fccf..92d07d2 100644 --- a/docs/haddock/tensorflow-test-0.1.0.0/haddock-util.js +++ b/docs/haddock/tensorflow-test-0.1.0.0/haddock-util.js @@ -131,11 +131,11 @@ function perform_search(full) var text = document.getElementById("searchbox").value.toLowerCase(); if (text == last_search && !full) return; last_search = text; - + var table = document.getElementById("indexlist"); var status = document.getElementById("searchmsg"); var children = table.firstChild.childNodes; - + // first figure out the first node with the prefix var first = bisect(-1); var last = (first == -1 ? -1 : bisect(1)); @@ -166,7 +166,7 @@ function perform_search(full) status.innerHTML = ""; } - + function setclass(first, last, status) { for (var i = first; i <= last; i++) @@ -174,8 +174,8 @@ function perform_search(full) children[i].className = status; } } - - + + // do a binary search, treating 0 as ... // return either -1 (no 0's found) or location of most far match function bisect(dir) @@ -201,9 +201,9 @@ function perform_search(full) if (checkitem(i) == 0) return i; } return -1; - } - - + } + + // from an index, decide what the result is // 0 = match, -1 is lower, 1 is higher function checkitem(i) @@ -212,8 +212,8 @@ function perform_search(full) if (s == text) return 0; else return (s > text ? -1 : 1); } - - + + // from an index, get its string // this abstracts over alternates function getitem(i) @@ -229,7 +229,7 @@ function perform_search(full) } function setSynopsis(filename) { - if (parent.window.synopsis) { + if (parent.window.synopsis && parent.window.synopsis.location) { if (parent.window.synopsis.location.replace) { // In Firefox this avoids adding the change to the history. parent.window.synopsis.location.replace(filename); @@ -248,33 +248,6 @@ function addMenuItem(html) { } } -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - function styles() { var i, a, es = document.getElementsByTagName("link"), rs = []; for (i = 0; a = es[i]; i++) { @@ -337,7 +310,6 @@ function styleMenu(show) { function pageLoad() { addStyleMenu(); - adjustForFrames(); resetStyle(); restoreCollapsed(); } diff --git a/docs/haddock/tensorflow-test-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-test-0.1.0.0/index-frames.html deleted file mode 100644 index f24d881..0000000 --- a/docs/haddock/tensorflow-test-0.1.0.0/index-frames.html +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-test-0.1.0.0: Some common functions for test suites. \ No newline at end of file diff --git a/docs/haddock/tensorflow-test-0.1.0.0/index.html b/docs/haddock/tensorflow-test-0.1.0.0/index.html index 027b767..97575a9 100644 --- a/docs/haddock/tensorflow-test-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-test-0.1.0.0/index.html @@ -1,4 +1,5 @@ -tensorflow-test-0.1.0.0: Some common functions for test suites.

                              tensorflow-test-0.1.0.0: Some common functions for test suites.

                              tensorflow-test-0.1.0.0: Some common functions for test suites.

                              Some common functions for test suites.

                              Modules

                              \ No newline at end of file +

                              tensorflow-test-0.1.0.0: Some common functions for test suites.

                              tensorflow-test-0.1.0.0: Some common functions for test suites.

                              This package provides common utility functions for the +TensorFlow test suites.

                              Modules

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-test-0.1.0.0/mini_TensorFlow-Test.html b/docs/haddock/tensorflow-test-0.1.0.0/mini_TensorFlow-Test.html index 7e0a016..96ca076 100644 --- a/docs/haddock/tensorflow-test-0.1.0.0/mini_TensorFlow-Test.html +++ b/docs/haddock/tensorflow-test-0.1.0.0/mini_TensorFlow-Test.html @@ -1,4 +1,4 @@ -TensorFlow.Test

                              TensorFlow.Test

                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-test-0.1.0.0/ocean.css b/docs/haddock/tensorflow-test-0.1.0.0/ocean.css index 1110b40..e8e4d70 100644 --- a/docs/haddock/tensorflow-test-0.1.0.0/ocean.css +++ b/docs/haddock/tensorflow-test-0.1.0.0/ocean.css @@ -41,6 +41,9 @@ a[href]:link { color: rgb(196,69,29); } a[href]:visited { color: rgb(171,105,84); } a[href]:hover { text-decoration:underline; } +a[href].def:link, a[href].def:visited { color: black; } +a[href].def:hover { color: rgb(78, 98, 114); } + /* @end */ /* @group Fonts & Sizes */ @@ -143,15 +146,23 @@ ul.links li a { background-image: url(plus.gif); background-repeat: no-repeat; } -p.caption.collapser, -p.caption.expander { - background-position: 0 0.4em; -} .collapser, .expander { padding-left: 14px; margin-left: -14px; cursor: pointer; } +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} + +.instance.collapser, .instance.expander { + margin-left: 0px; + background-position: left center; + min-width: 9px; + min-height: 9px; +} + pre { padding: 0.25em; @@ -172,6 +183,9 @@ pre { .keyword { font-weight: normal; } .def { font-weight: bold; } +@media print { + #footer { display: none; } +} /* @end */ @@ -319,6 +333,8 @@ div#style-menu-holder { top: 10%; padding: 0; max-width: 75%; + /* Ensure that synopsis covers everything (including MathJAX markup) */ + z-index: 1; } #synopsis .caption { @@ -370,21 +386,16 @@ div#style-menu-holder { #interface h5 + div.top { margin-top: 1em; } -#interface p.src .link { +#interface .src .selflink, +#interface .src .link { float: right; color: #919191; - border-left: 1px solid #919191; background: #f0f0f0; padding: 0 0.5em 0.2em; - margin: 0 -0.5em 0 0.5em; + margin: 0 -0.5em 0 0; } - -#interface td.src .link { - float: right; - color: #919191; +#interface .src .selflink { border-left: 1px solid #919191; - background: #f0f0f0; - padding: 0 0.5em 0.2em; margin: 0 -0.5em 0 0.5em; } @@ -424,30 +435,31 @@ div#style-menu-holder { visibility: hidden; } -.subs dl { +.subs ul { + list-style: none; + display: table; margin: 0; } -.subs dt { - float: left; - clear: left; - display: block; - margin: 1px 0; +.subs ul li { + display: table-row; } -.subs dd { - float: right; - width: 90%; - display: block; +.subs ul li dfn { + display: table-cell; + font-style: normal; + font-weight: bold; + margin: 1px 0; + white-space: nowrap; +} + +.subs ul li > .doc { + display: table-cell; padding-left: 0.5em; margin-bottom: 0.5em; } -.subs dd.empty { - display: none; -} - -.subs dd p { +.subs ul li > .doc p { margin: 0; } diff --git a/docs/haddock/tensorflow-test-0.1.0.0/src/TensorFlow.Test.html b/docs/haddock/tensorflow-test-0.1.0.0/src/TensorFlow.Test.html new file mode 100644 index 0000000..3110a00 --- /dev/null +++ b/docs/haddock/tensorflow-test-0.1.0.0/src/TensorFlow.Test.html @@ -0,0 +1,33 @@ +
                              -- Copyright 2016 TensorFlow authors.
                              +--
                              +-- Licensed under the Apache License, Version 2.0 (the "License");
                              +-- you may not use this file except in compliance with the License.
                              +-- You may obtain a copy of the License at
                              +--
                              +--     http://www.apache.org/licenses/LICENSE-2.0
                              +--
                              +-- Unless required by applicable law or agreed to in writing, software
                              +-- distributed under the License is distributed on an "AS IS" BASIS,
                              +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                              +-- See the License for the specific language governing permissions and
                              +-- limitations under the License.
                              +
                              +{-# LANGUAGE OverloadedStrings #-}
                              +
                              +module TensorFlow.Test
                              +    ( assertAllClose
                              +    ) where
                              +
                              +import qualified Data.Vector as V
                              +import Test.HUnit ((@?))
                              +import Test.HUnit.Lang (Assertion)
                              +-- | Compares that the vectors are element-by-element equal within the given
                              +-- tolerance. Raises an assertion and prints some information if not.
                              +assertAllClose :: V.Vector Float -> V.Vector Float -> Assertion
                              +assertAllClose xs ys = all (<= tol) (V.zipWith absDiff xs ys) @?
                              +    "Difference > tolerance: \nxs: " ++ show xs ++ "\nys: " ++ show ys
                              +        ++ "\ntolerance: " ++ show tol
                              +  where
                              +      absDiff x y = abs (x - y)
                              +      tol = 0.001 :: Float
                              +
                              \ No newline at end of file diff --git a/docs/haddock/tensorflow-test-0.1.0.0/src/highlight.js b/docs/haddock/tensorflow-test-0.1.0.0/src/highlight.js new file mode 100644 index 0000000..1e903bd --- /dev/null +++ b/docs/haddock/tensorflow-test-0.1.0.0/src/highlight.js @@ -0,0 +1,27 @@ + +var highlight = function (on) { + return function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + var that = links[i]; + + if (this.href != that.href) { + continue; + } + + if (on) { + that.classList.add("hover-highlight"); + } else { + that.classList.remove("hover-highlight"); + } + } + } +}; + +window.onload = function () { + var links = document.getElementsByTagName('a'); + for (var i = 0; i < links.length; i++) { + links[i].onmouseover = highlight(true); + links[i].onmouseout = highlight(false); + } +}; diff --git a/docs/haddock/tensorflow-test-0.1.0.0/src/style.css b/docs/haddock/tensorflow-test-0.1.0.0/src/style.css new file mode 100644 index 0000000..e83dc5e --- /dev/null +++ b/docs/haddock/tensorflow-test-0.1.0.0/src/style.css @@ -0,0 +1,55 @@ +body { + background-color: #fdf6e3; +} + +.hs-identifier { + color: #073642; +} + +.hs-identifier.hs-var { +} + +.hs-identifier.hs-type { + color: #5f5faf; +} + +.hs-keyword { + color: #af005f; +} + +.hs-string, .hs-char { + color: #cb4b16; +} + +.hs-number { + color: #268bd2; +} + +.hs-operator { + color: #d33682; +} + +.hs-glyph, .hs-special { + color: #dc322f; +} + +.hs-comment { + color: #8a8a8a; +} + +.hs-pragma { + color: #2aa198; +} + +.hs-cpp { + color: #859900; +} + +a:link, a:visited { + text-decoration: none; + border-bottom: 1px solid #eee8d5; +} + +a:hover, a.hover-highlight { + background-color: #eee8d5; +} diff --git a/docs/haddock/tensorflow-test-0.1.0.0/tensorflow-test.txt b/docs/haddock/tensorflow-test-0.1.0.0/tensorflow-test.txt deleted file mode 100644 index 98a38c2..0000000 --- a/docs/haddock/tensorflow-test-0.1.0.0/tensorflow-test.txt +++ /dev/null @@ -1,16 +0,0 @@ --- Hoogle documentation, generated by Haddock --- See Hoogle, http://www.haskell.org/hoogle/ - - --- | Some common functions for test suites. --- --- Some common functions for test suites. -@package tensorflow-test -@version 0.1.0.0 - -module TensorFlow.Test - --- | Compares that the vectors are element-by-element equal within the --- given tolerance. Raises an assertion and prints some information if --- not. -assertAllClose :: Vector Float -> Vector Float -> Assertion